Add model
Browse files- README.md +72 -0
- config.json +52 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- vocab.json +0 -0
README.md
CHANGED
|
@@ -1,3 +1,75 @@
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
| 4 |
+
|
| 5 |
+
# OFA-huge-caption
|
| 6 |
+
|
| 7 |
+
This is the **huge** version of OFA pretrained model finetuned on COCO captioning task, forked & converted from the [original fairseq version](https://ofa-beijing.oss-cn-beijing.aliyuncs.com/checkpoints/caption_huge_best.pt) and compressed into float16.
|
| 8 |
+
|
| 9 |
+
The conversion script is custom, but the procedure described [Issue #171](https://github.com/OFA-Sys/OFA/issues/171) should also apply (quantization is not performed, but that's trivial).
|
| 10 |
+
|
| 11 |
+
You will need a [OFA modified version of transformers](https://github.com/OFA-Sys/OFA/tree/feature/add_transformers) to use this model. No idea why it is still not in master. Tips: You can just copy-paste the `transformers` folder into your project and rename it, then monkey-patch the `transformers` module to point to your local copy to avoid having to install it.
|
| 12 |
+
|
| 13 |
+
## Original README below
|
| 14 |
+
## Introduction
|
| 15 |
+
|
| 16 |
+
This is the **huge** version of OFA pretrained model. OFA is a unified multimodal pretrained model that unifies modalities (i.e., cross-modality, vision, language) and tasks (e.g., image generation, visual grounding, image captioning, image classification, text generation, etc.) to a simple sequence-to-sequence learning framework.
|
| 17 |
+
|
| 18 |
+
The directory includes 4 files, namely `config.json` which consists of model configuration, `vocab.json` and `merge.txt` for our OFA tokenizer, and lastly `pytorch_model.bin` which consists of model weights. There is no need to worry about the mismatch between Fairseq and transformers, since we have addressed the issue yet.
|
| 19 |
+
|
| 20 |
+
## How to use
|
| 21 |
+
|
| 22 |
+
To use it in transformers, please refer to <https://github.com/OFA-Sys/OFA/tree/feature/add_transformers>. Install the transformers and download the models as shown below.
|
| 23 |
+
|
| 24 |
+
```bash
|
| 25 |
+
git clone --single-branch --branch feature/add_transformers https://github.com/OFA-Sys/OFA.git
|
| 26 |
+
pip install OFA/transformers/
|
| 27 |
+
git clone https://huggingface.co/OFA-Sys/OFA-huge
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
After, refer the path to OFA-huge to `ckpt_dir`, and prepare an image for the testing example below. Also, ensure that you have pillow and torchvision in your environment.
|
| 31 |
+
|
| 32 |
+
```python
|
| 33 |
+
>>> from PIL import Image
|
| 34 |
+
>>> from torchvision import transforms
|
| 35 |
+
>>> from transformers import OFATokenizer, OFAModel
|
| 36 |
+
>>> from generate import sequence_generator
|
| 37 |
+
|
| 38 |
+
>>> mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
|
| 39 |
+
>>> resolution = 480
|
| 40 |
+
>>> patch_resize_transform = transforms.Compose([
|
| 41 |
+
lambda image: image.convert("RGB"),
|
| 42 |
+
transforms.Resize((resolution, resolution), interpolation=Image.BICUBIC),
|
| 43 |
+
transforms.ToTensor(),
|
| 44 |
+
transforms.Normalize(mean=mean, std=std)
|
| 45 |
+
])
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
>>> tokenizer = OFATokenizer.from_pretrained(ckpt_dir)
|
| 49 |
+
|
| 50 |
+
>>> txt = " what does the image describe?"
|
| 51 |
+
>>> inputs = tokenizer([txt], return_tensors="pt").input_ids
|
| 52 |
+
>>> img = Image.open(path_to_image)
|
| 53 |
+
>>> patch_img = patch_resize_transform(img).unsqueeze(0)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# using the generator of fairseq version
|
| 57 |
+
>>> model = OFAModel.from_pretrained(ckpt_dir, use_cache=True)
|
| 58 |
+
>>> generator = sequence_generator.SequenceGenerator(
|
| 59 |
+
tokenizer=tokenizer,
|
| 60 |
+
beam_size=5,
|
| 61 |
+
max_len_b=16,
|
| 62 |
+
min_len=0,
|
| 63 |
+
no_repeat_ngram_size=3,
|
| 64 |
+
)
|
| 65 |
+
>>> data = {}
|
| 66 |
+
>>> data["net_input"] = {"input_ids": inputs, 'patch_images': patch_img, 'patch_masks':torch.tensor([True])}
|
| 67 |
+
>>> gen_output = generator.generate([model], data)
|
| 68 |
+
>>> gen = [gen_output[i][0]["tokens"] for i in range(len(gen_output))]
|
| 69 |
+
|
| 70 |
+
# using the generator of huggingface version
|
| 71 |
+
>>> model = OFAModel.from_pretrained(ckpt_dir, use_cache=False)
|
| 72 |
+
>>> gen = model.generate(inputs, patch_images=patch_img, num_beams=5, no_repeat_ngram_size=3)
|
| 73 |
+
|
| 74 |
+
>>> print(tokenizer.batch_decode(gen, skip_special_tokens=True))
|
| 75 |
+
```
|
config.json
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"activation_dropout": 0.0,
|
| 3 |
+
"activation_function": "gelu",
|
| 4 |
+
"add_type_embedding": true,
|
| 5 |
+
"architectures": [
|
| 6 |
+
"OFAModel"
|
| 7 |
+
],
|
| 8 |
+
"attention_dropout": 0.0,
|
| 9 |
+
"attn_scale_factor": 2.0,
|
| 10 |
+
"bos_token_id": 0,
|
| 11 |
+
"classifier_dropout": 0.0,
|
| 12 |
+
"code_image_size": 128,
|
| 13 |
+
"code_layernorm_embedding": true,
|
| 14 |
+
"d_model": 1280,
|
| 15 |
+
"decoder_attention_heads": 16,
|
| 16 |
+
"decoder_drop_path_rate": 0.0,
|
| 17 |
+
"decoder_ffn_dim": 5120,
|
| 18 |
+
"decoder_layerdrop": 0.0,
|
| 19 |
+
"decoder_layers": 12,
|
| 20 |
+
"decoder_normalize_before": true,
|
| 21 |
+
"decoder_start_token_id": 0,
|
| 22 |
+
"dropout": 0.1,
|
| 23 |
+
"encoder_attention_heads": 16,
|
| 24 |
+
"encoder_drop_path_rate": 0.0,
|
| 25 |
+
"encoder_ffn_dim": 5120,
|
| 26 |
+
"encoder_layerdrop": 0.0,
|
| 27 |
+
"encoder_layers": 24,
|
| 28 |
+
"encoder_normalize_before": true,
|
| 29 |
+
"entangle_position_embedding": false,
|
| 30 |
+
"eos_token_id": 2,
|
| 31 |
+
"forced_eos_token_id": 2,
|
| 32 |
+
"image_bucket_size": 42,
|
| 33 |
+
"init_std": 0.02,
|
| 34 |
+
"is_encoder_decoder": true,
|
| 35 |
+
"layernorm_embedding": true,
|
| 36 |
+
"max_position_embeddings": 1024,
|
| 37 |
+
"model_type": "ofa",
|
| 38 |
+
"normformer": true,
|
| 39 |
+
"num_hidden_layers": 24,
|
| 40 |
+
"pad_token_id": 1,
|
| 41 |
+
"patch_layernorm_embedding": true,
|
| 42 |
+
"resnet_drop_path_rate": 0.0,
|
| 43 |
+
"resnet_model_path": null,
|
| 44 |
+
"resnet_type": "resnet152",
|
| 45 |
+
"scale_embedding": false,
|
| 46 |
+
"share_decoder_input_output_embed": true,
|
| 47 |
+
"token_bucket_size": 256,
|
| 48 |
+
"torch_dtype": "float16",
|
| 49 |
+
"transformers_version": "4.15.0",
|
| 50 |
+
"use_cache": false,
|
| 51 |
+
"vocab_size": 59457
|
| 52 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:11f73f6ed0cdadad2b616e3014fd91b80064275b5c5f189b5ebe2eee2ce70681
|
| 3 |
+
size 2230901681
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|