sha
null
last_modified
null
library_name
stringclasses
154 values
text
stringlengths
1
900k
metadata
stringlengths
2
348k
pipeline_tag
stringclasses
45 values
id
stringlengths
5
122
tags
listlengths
1
1.84k
created_at
stringlengths
25
25
arxiv
listlengths
0
201
languages
listlengths
0
1.83k
tags_str
stringlengths
17
9.34k
text_str
stringlengths
0
389k
text_lists
listlengths
0
722
processed_texts
listlengths
1
723
tokens_length
listlengths
1
723
input_texts
listlengths
1
61
embeddings
listlengths
768
768
null
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> Made by finetuning [google/flan-t5-small](https://huggingface.co/google/flan-t5-small).
{"license": "unknown", "metrics": ["bleu"], "pipeline_tag": "translation"}
translation
aboli-marathe/flan_t5_3185BLEU
[ "transformers", "safetensors", "t5", "text2text-generation", "translation", "license:unknown", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
2023-11-12T17:02:03+00:00
[]
[]
TAGS #transformers #safetensors #t5 #text2text-generation #translation #license-unknown #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID Made by finetuning google/flan-t5-small.
[ "# Model Card for Model ID\n\n\n\nMade by finetuning google/flan-t5-small." ]
[ "TAGS\n#transformers #safetensors #t5 #text2text-generation #translation #license-unknown #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID\n\n\n\nMade by finetuning google/flan-t5-small." ]
[ 59, 20 ]
[ "passage: TAGS\n#transformers #safetensors #t5 #text2text-generation #translation #license-unknown #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n# Model Card for Model ID\n\n\n\nMade by finetuning google/flan-t5-small." ]
[ -0.011941848322749138, -0.022965282201766968, -0.0022553654853254557, 0.07177906483411789, 0.13740822672843933, 0.027347607538104057, 0.22754520177841187, 0.0798010379076004, 0.05369769036769867, -0.0540769025683403, 0.14481115341186523, 0.12744782865047455, 0.03279551491141319, 0.24586753...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](ht...
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "microsoft/swin-tiny-patch4-window7-224", "model-index": [{"name": "swin-tiny-patch4-window7-224-finetuned-eurosat", "results": []}]}
image-classification
Artemiy27/swin-tiny-patch4-window7-224-finetuned-eurosat
[ "transformers", "tensorboard", "safetensors", "swin", "image-classification", "generated_from_trainer", "base_model:microsoft/swin-tiny-patch4-window7-224", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T17:02:41+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #swin #image-classification #generated_from_trainer #base_model-microsoft/swin-tiny-patch4-window7-224 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
swin-tiny-patch4-window7-224-finetuned-eurosat ============================================== This model is a fine-tuned version of microsoft/swin-tiny-patch4-window7-224 on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.0136 * Accuracy: 0.9938 Model description -----------...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 32\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 128\n* optimizer: Adam with betas=(0.9,0.999) and epsilo...
[ "TAGS\n#transformers #tensorboard #safetensors #swin #image-classification #generated_from_trainer #base_model-microsoft/swin-tiny-patch4-window7-224 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during tr...
[ 77, 144, 4, 33 ]
[ "passage: TAGS\n#transformers #tensorboard #safetensors #swin #image-classification #generated_from_trainer #base_model-microsoft/swin-tiny-patch4-window7-224 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during...
[ -0.142108753323555, 0.13684308528900146, -0.0017721746116876602, 0.08609165996313095, 0.13537132740020752, 0.015442646108567715, 0.1117485836148262, 0.1371457874774933, -0.09346076101064682, 0.10155948996543884, 0.12644082307815552, 0.10468471795320511, 0.059301506727933884, 0.156470626592...
null
null
transformers
All Rights Reserved
{}
text-generation
f0rGoTTen000/AgroGPT_125M
[ "transformers", "pytorch", "gpt_neo", "text-generation", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T17:11:20+00:00
[]
[]
TAGS #transformers #pytorch #gpt_neo #text-generation #autotrain_compatible #endpoints_compatible #region-us
All Rights Reserved
[]
[ "TAGS\n#transformers #pytorch #gpt_neo #text-generation #autotrain_compatible #endpoints_compatible #region-us \n" ]
[ 39 ]
[ "passage: TAGS\n#transformers #pytorch #gpt_neo #text-generation #autotrain_compatible #endpoints_compatible #region-us \n" ]
[ -0.031129082664847374, 0.010089954361319542, -0.005786326713860035, 0.002382909180596471, 0.17449840903282166, 0.03556443750858307, 0.05251007154583931, 0.13062667846679688, -0.03914913907647133, -0.02130643092095852, 0.14617420732975006, 0.1955074667930603, -0.02011914923787117, 0.1474472...
null
null
diffusers
### My-Pet-rabbit Dreambooth model trained by MANASA-1919 following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: PIETW-212 Sample pictures of this concept: ![0](https://huggingface.co/MANASA-1919/my-pet-rabbit/resolve/main/sample_images/enq-1998-pxu2yxCTbTU-unsplash_(1).jpg) ...
{"license": "creativeml-openrail-m", "tags": ["NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion"]}
text-to-image
MANASA-1919/my-pet-rabbit
[ "diffusers", "safetensors", "NxtWave-GenAI-Webinar", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "endpoints_compatible", "has_space", "diffusers:StableDiffusionPipeline", "region:us" ]
2023-11-12T17:23:49+00:00
[]
[]
TAGS #diffusers #safetensors #NxtWave-GenAI-Webinar #text-to-image #stable-diffusion #license-creativeml-openrail-m #endpoints_compatible #has_space #diffusers-StableDiffusionPipeline #region-us
### My-Pet-rabbit Dreambooth model trained by MANASA-1919 following the "Build your own Gen AI model" session by NxtWave. Project Submission Code: PIETW-212 Sample pictures of this concept: !0.jpg)
[ "### My-Pet-rabbit Dreambooth model trained by MANASA-1919 following the \"Build your own Gen AI model\" session by NxtWave.\n\nProject Submission Code: PIETW-212\n\nSample pictures of this concept:\n\n !0.jpg)" ]
[ "TAGS\n#diffusers #safetensors #NxtWave-GenAI-Webinar #text-to-image #stable-diffusion #license-creativeml-openrail-m #endpoints_compatible #has_space #diffusers-StableDiffusionPipeline #region-us \n", "### My-Pet-rabbit Dreambooth model trained by MANASA-1919 following the \"Build your own Gen AI model\" session...
[ 77, 59 ]
[ "passage: TAGS\n#diffusers #safetensors #NxtWave-GenAI-Webinar #text-to-image #stable-diffusion #license-creativeml-openrail-m #endpoints_compatible #has_space #diffusers-StableDiffusionPipeline #region-us \n### My-Pet-rabbit Dreambooth model trained by MANASA-1919 following the \"Build your own Gen AI model\" sess...
[ -0.09466423839330673, 0.13253657519817352, -0.0013108402490615845, 0.004656786564737558, 0.06036553904414177, -0.014421354047954082, 0.14546090364456177, 0.009823665954172611, -0.05602928251028061, 0.03862942010164261, 0.1504296213388443, 0.036816202104091644, 0.021976718679070473, 0.17629...
null
null
transformers
<div align="center"> <h1> TransNormerLLM -- A Faster and Better LLM </h1> </div> <p align="center"> 💻 <a href="https://github.com/OpenNLPLab/TransnormerLLM" target="_blank">GitHub </a> • 💬 <a href="https://discord.gg/W4Vr7AKW" target="_blank">Discord</a> • 💬 <a href="./images/contact_me_qr.png" target="_blank">W...
{"language": ["en", "zh"], "license": "other", "tags": [" TransNormerLLM"], "pipeline_tag": "text-generation"}
text-generation
OpenNLPLab/TransNormerLLM-7B
[ "transformers", "pytorch", "text-generation", " TransNormerLLM", "custom_code", "en", "zh", "arxiv:2307.14995", "arxiv:2009.03300", "license:other", "autotrain_compatible", "region:us" ]
2023-11-12T17:25:50+00:00
[ "2307.14995", "2009.03300" ]
[ "en", "zh" ]
TAGS #transformers #pytorch #text-generation # TransNormerLLM #custom_code #en #zh #arxiv-2307.14995 #arxiv-2009.03300 #license-other #autotrain_compatible #region-us
TransNormerLLM -- A Faster and Better LLM =========================================== [GitHub](URL target=) • [Discord](URL target=) • [Wechat](./images/contact_me_qr.png) Table of Contents ================= * Introduction * Released Weights * Benchmark Results + General Domain - Model Results * Inferen...
[ "### Model Results\n\n\nPerformance Comparison on Commonsense Reasoning and Aggregated Benchmarks. For a fair comparison, we report competing methods' results reproduced by us using their released models. PS: parameter size (billion). T: tokens (trillion). HS: HellaSwag. WG: WinoGrande.\n\n\n\nInference and Deploym...
[ "TAGS\n#transformers #pytorch #text-generation # TransNormerLLM #custom_code #en #zh #arxiv-2307.14995 #arxiv-2009.03300 #license-other #autotrain_compatible #region-us \n", "### Model Results\n\n\nPerformance Comparison on Commonsense Reasoning and Aggregated Benchmarks. For a fair comparison, we report competin...
[ 62, 207, 857 ]
[ "passage: TAGS\n#transformers #pytorch #text-generation # TransNormerLLM #custom_code #en #zh #arxiv-2307.14995 #arxiv-2009.03300 #license-other #autotrain_compatible #region-us \n### Model Results\n\n\nPerformance Comparison on Commonsense Reasoning and Aggregated Benchmarks. For a fair comparison, we report compe...
[ -0.08346836268901825, 0.02420392446219921, 0.001026795944198966, 0.07055620104074478, 0.13346947729587555, 0.033620063215494156, 0.02029181271791458, 0.06454194337129593, 0.030359001830220222, 0.03821771219372749, 0.01619586907327175, 0.03526497259736061, 0.11383260786533356, 0.04776724427...
null
null
null
# Lora of haruka_makino_onichichi This model is trained with [HCP-Diffusion](https://github.com/7eu7d7/HCP-Diffusion). And the auto-training framework is maintained by [DeepGHS Team](https://huggingface.co/deepghs). The base model used during training is [NAI](https://huggingface.co/deepghs/animefull-latest), and th...
{"license": "mit", "tags": ["art"], "datasets": ["CyberHarem/haruka_makino_onichichi"], "pipeline_tag": "text-to-image"}
text-to-image
CyberHarem/haruka_makino_onichichi
[ "art", "text-to-image", "dataset:CyberHarem/haruka_makino_onichichi", "license:mit", "region:us" ]
2023-11-12T17:26:47+00:00
[]
[]
TAGS #art #text-to-image #dataset-CyberHarem/haruka_makino_onichichi #license-mit #region-us
Lora of haruka\_makino\_onichichi ================================= This model is trained with HCP-Diffusion. And the auto-training framework is maintained by DeepGHS Team. The base model used during training is NAI, and the base model used for generating preview images is Meina/MeinaMix\_V11. After downloading t...
[]
[ "TAGS\n#art #text-to-image #dataset-CyberHarem/haruka_makino_onichichi #license-mit #region-us \n" ]
[ 38 ]
[ "passage: TAGS\n#art #text-to-image #dataset-CyberHarem/haruka_makino_onichichi #license-mit #region-us \n" ]
[ 0.003054808359593153, 0.08358627557754517, -0.004329655319452286, 0.10880003869533539, 0.11663851141929626, 0.0768394023180008, 0.2980847656726837, 0.09001840651035309, 0.08185688406229019, -0.016139397397637367, 0.1508210450410843, 0.06848318874835968, 0.042822062969207764, 0.031779795885...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-xsum_readme_summarization This model is a fine-tuned version of [google/pegasus-xsum](https://huggingface.co/google/pega...
{"tags": ["generated_from_trainer"], "metrics": ["rouge"], "base_model": "google/pegasus-xsum", "model-index": [{"name": "pegasus-xsum_readme_summarization", "results": []}]}
text2text-generation
bunbohue/pegasus-xsum_readme_summarization
[ "transformers", "safetensors", "pegasus", "text2text-generation", "generated_from_trainer", "base_model:google/pegasus-xsum", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T17:33:52+00:00
[]
[]
TAGS #transformers #safetensors #pegasus #text2text-generation #generated_from_trainer #base_model-google/pegasus-xsum #autotrain_compatible #endpoints_compatible #region-us
pegasus-xsum\_readme\_summarization =================================== This model is a fine-tuned version of google/pegasus-xsum on the None dataset. It achieves the following results on the evaluation set: * Loss: 2.3151 * Rouge1: 0.4555 * Rouge2: 0.313 * Rougel: 0.43 * Rougelsum: 0.4306 * Gen Len: 20.4628 Mode...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 4", "### Training...
[ "TAGS\n#transformers #safetensors #pegasus #text2text-generation #generated_from_trainer #base_model-google/pegasus-xsum #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_...
[ 61, 98, 4, 33 ]
[ "passage: TAGS\n#transformers #safetensors #pegasus #text2text-generation #generated_from_trainer #base_model-google/pegasus-xsum #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train...
[ -0.10172242671251297, 0.04882853478193283, -0.0015264730900526047, 0.11111883074045181, 0.17520025372505188, 0.013508275151252747, 0.15961115062236786, 0.09683938324451447, -0.08661746233701706, 0.04224911704659462, 0.13885879516601562, 0.11442262679338455, -0.003248627530410886, 0.1559090...
null
null
peft
# Model Card for CNC-7b ## Model Details - Name: CNC-7b - Version: 1.0 - Release Date: November 13, 2023 ## Intended Use CNC-7b is a lora adapter for Mistral-7b (Instruct) intended to be clear, concise, and helpful in short text conversations. It is designed for conversational agents and assistants. ## Training Da...
{"language": ["en", "tl"], "license": "cc-by-sa-4.0", "library_name": "peft", "tags": ["mistral", "lora", "instruct", "custom code"], "datasets": ["NewstaR/clearNconcise"], "pipeline_tag": "text-generation", "inference": false, "base_model": "mistralai/Mistral-7B-v0.1"}
text-generation
NewstaR/CNC-7b-lora
[ "peft", "mistral", "lora", "instruct", "custom code", "text-generation", "en", "tl", "dataset:NewstaR/clearNconcise", "base_model:mistralai/Mistral-7B-v0.1", "license:cc-by-sa-4.0", "region:us" ]
2023-11-12T17:35:38+00:00
[]
[ "en", "tl" ]
TAGS #peft #mistral #lora #instruct #custom code #text-generation #en #tl #dataset-NewstaR/clearNconcise #base_model-mistralai/Mistral-7B-v0.1 #license-cc-by-sa-4.0 #region-us
# Model Card for CNC-7b ## Model Details - Name: CNC-7b - Version: 1.0 - Release Date: November 13, 2023 ## Intended Use CNC-7b is a lora adapter for Mistral-7b (Instruct) intended to be clear, concise, and helpful in short text conversations. It is designed for conversational agents and assistants. ## Training Da...
[ "# Model Card for CNC-7b", "## Model Details\n\n- Name: CNC-7b\n- Version: 1.0\n- Release Date: November 13, 2023", "## Intended Use\n\nCNC-7b is a lora adapter for Mistral-7b (Instruct) intended to be clear, concise, and helpful in short text conversations. It is designed for conversational agents and assistan...
[ "TAGS\n#peft #mistral #lora #instruct #custom code #text-generation #en #tl #dataset-NewstaR/clearNconcise #base_model-mistralai/Mistral-7B-v0.1 #license-cc-by-sa-4.0 #region-us \n", "# Model Card for CNC-7b", "## Model Details\n\n- Name: CNC-7b\n- Version: 1.0\n- Release Date: November 13, 2023", "## Intende...
[ 72, 7, 20, 50, 47, 37, 77, 81 ]
[ "passage: TAGS\n#peft #mistral #lora #instruct #custom code #text-generation #en #tl #dataset-NewstaR/clearNconcise #base_model-mistralai/Mistral-7B-v0.1 #license-cc-by-sa-4.0 #region-us \n# Model Card for CNC-7b## Model Details\n\n- Name: CNC-7b\n- Version: 1.0\n- Release Date: November 13, 2023## Intended Use\n\n...
[ -0.03802156820893288, 0.10403156280517578, -0.0038227131590247154, 0.04984186589717865, 0.06264986097812653, -0.020623620599508286, 0.06629223376512527, 0.0455867201089859, 0.025111690163612366, 0.106367327272892, 0.014057626016438007, 0.010684250853955746, 0.022875262424349785, -0.0014400...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # FakeNews-bert-large-cased-stable This model is a fine-tuned version of [bert-large-cased](https://huggingface.co/bert-large-case...
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "bert-large-cased", "model-index": [{"name": "FakeNews-bert-large-cased-stable", "results": []}]}
text-classification
Denyol/FakeNews-bert-large-cased-stable
[ "transformers", "tensorboard", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:bert-large-cased", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T17:39:25+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #bert #text-classification #generated_from_trainer #base_model-bert-large-cased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
FakeNews-bert-large-cased-stable ================================ This model is a fine-tuned version of bert-large-cased on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.1020 * Accuracy: 0.9827 Model description ----------------- More information needed Intended uses &...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 3e-05\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 4\n* seed: 42\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 8\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e...
[ "TAGS\n#transformers #tensorboard #safetensors #bert #text-classification #generated_from_trainer #base_model-bert-large-cased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning...
[ 68, 144, 4, 33 ]
[ "passage: TAGS\n#transformers #tensorboard #safetensors #bert #text-classification #generated_from_trainer #base_model-bert-large-cased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learn...
[ -0.13622143864631653, 0.10832702368497849, -0.0013251675991341472, 0.08092275261878967, 0.14709462225437164, 0.016844678670167923, 0.12600374221801758, 0.12634369730949402, -0.12376739829778671, 0.0713043212890625, 0.10893415659666061, 0.07735035568475723, 0.029939601197838783, 0.134975954...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt-neo-125M-couples_therapist_full_renamed This model is a fine-tuned version of [EleutherAI/gpt-neo-125M](https://huggingface....
{"license": "mit", "tags": ["generated_from_trainer"], "base_model": "EleutherAI/gpt-neo-125M", "model-index": [{"name": "gpt-neo-125M-couples_therapist_full_renamed", "results": []}]}
text-generation
ColleenMacklin/gpt-neo-125M-couples_therapist_full_renamed
[ "transformers", "tensorboard", "safetensors", "gpt_neo", "text-generation", "generated_from_trainer", "base_model:EleutherAI/gpt-neo-125M", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T17:41:44+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neo #text-generation #generated_from_trainer #base_model-EleutherAI/gpt-neo-125M #license-mit #autotrain_compatible #endpoints_compatible #region-us
gpt-neo-125M-couples\_therapist\_full\_renamed ============================================== This model is a fine-tuned version of EleutherAI/gpt-neo-125M on the None dataset. It achieves the following results on the evaluation set: * Loss: 3.0235 Model description ----------------- More information needed I...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 3.0", "### Traini...
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neo #text-generation #generated_from_trainer #base_model-EleutherAI/gpt-neo-125M #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\...
[ 73, 98, 4, 33 ]
[ "passage: TAGS\n#transformers #tensorboard #safetensors #gpt_neo #text-generation #generated_from_trainer #base_model-EleutherAI/gpt-neo-125M #license-mit #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learni...
[ -0.09340846538543701, 0.06741952151060104, -0.0018559551099315286, 0.10453764349222183, 0.13651709258556366, 0.016949983313679695, 0.1594475656747818, 0.11260412633419037, -0.07910064607858658, 0.060557398945093155, 0.13735359907150269, 0.11269506812095642, 0.018921131268143654, 0.13898272...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ICU_Returns_ClinicalBERT This model is a fine-tuned version of [medicalai/ClinicalBERT](https://huggingface.co/medicalai/Clinica...
{"tags": ["generated_from_trainer"], "base_model": "medicalai/ClinicalBERT", "model-index": [{"name": "ICU_Returns_ClinicalBERT", "results": []}]}
text-classification
moro01525/ICU_Returns_ClinicalBERT
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "base_model:medicalai/ClinicalBERT", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T17:43:41+00:00
[]
[]
TAGS #transformers #pytorch #distilbert #text-classification #generated_from_trainer #base_model-medicalai/ClinicalBERT #autotrain_compatible #endpoints_compatible #region-us
ICU\_Returns\_ClinicalBERT ========================== This model is a fine-tuned version of medicalai/ClinicalBERT on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 1.3201 * F1:: 0.7134 * Roc Auc: 0.7225 * Precision with 0:: 0.8462 * Precision with 1:: 0.6640 * Recall with 0:: ...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 13", "### Trai...
[ "TAGS\n#transformers #pytorch #distilbert #text-classification #generated_from_trainer #base_model-medicalai/ClinicalBERT #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\...
[ 58, 97, 4, 33 ]
[ "passage: TAGS\n#transformers #pytorch #distilbert #text-classification #generated_from_trainer #base_model-medicalai/ClinicalBERT #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* tra...
[ -0.1032809242606163, 0.05841078236699104, -0.0018423302099108696, 0.1014985516667366, 0.18858270347118378, 0.034419309347867966, 0.11507382243871689, 0.12837035953998566, -0.10963667929172516, 0.031722672283649445, 0.13512365520000458, 0.15409676730632782, -0.004192421678453684, 0.11714092...
null
null
stable-baselines3
# **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 ...
{"library_name": "stable-baselines3", "tags": ["LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "stable-baselines3"], "model-index": [{"name": "PPO", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "LunarLander-v2", "type": "LunarL...
reinforcement-learning
vones/ppo-LunarLander-v2
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
2023-11-12T17:44:15+00:00
[]
[]
TAGS #stable-baselines3 #LunarLander-v2 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us
# PPO Agent playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2 using the stable-baselines3 library. ## Usage (with Stable-baselines3) TODO: Add your code
[ "# PPO Agent playing LunarLander-v2\nThis is a trained model of a PPO agent playing LunarLander-v2\nusing the stable-baselines3 library.", "## Usage (with Stable-baselines3)\nTODO: Add your code" ]
[ "TAGS\n#stable-baselines3 #LunarLander-v2 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us \n", "# PPO Agent playing LunarLander-v2\nThis is a trained model of a PPO agent playing LunarLander-v2\nusing the stable-baselines3 library.", "## Usage (with Stable-baselines3)\nTODO: Add you...
[ 39, 41, 17 ]
[ "passage: TAGS\n#stable-baselines3 #LunarLander-v2 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us \n# PPO Agent playing LunarLander-v2\nThis is a trained model of a PPO agent playing LunarLander-v2\nusing the stable-baselines3 library.## Usage (with Stable-baselines3)\nTODO: Add your c...
[ 0.03942384943366051, 0.04900386184453964, -0.005304091144353151, 0.026427261531352997, 0.107408307492733, -0.026511888951063156, 0.11188238859176636, 0.0814051404595375, 0.10722193866968155, 0.04762078449130058, 0.08338645845651627, 0.06030960753560066, 0.05080918222665787, 0.2571701407432...
null
null
peft
## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_doub...
{"library_name": "peft"}
null
runse/OPS-koalpaca-polyglot-12.8b
[ "peft", "tensorboard", "region:us" ]
2023-11-12T17:49:35+00:00
[]
[]
TAGS #peft #tensorboard #region-us
## Training procedure The following 'bitsandbytes' quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_doub...
[ "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- b...
[ "TAGS\n#peft #tensorboard #region-us \n", "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_we...
[ 13, 154, 11 ]
[ "passage: TAGS\n#peft #tensorboard #region-us \n## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16...
[ -0.08857559412717819, 0.05881764739751816, -0.0026079299859702587, 0.13755552470684052, 0.08889607340097427, 0.059008270502090454, 0.13547134399414062, 0.1311831772327423, 0.044567741453647614, 0.0979868620634079, 0.08869192004203796, 0.04804614931344986, 0.07064280658960342, 0.11643702536...
null
null
null
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fine-tuned-text-summarization This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-...
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "base_model": "google/flan-t5-base", "model-index": [{"name": "fine-tuned-text-summarization", "results": []}]}
null
AlyGreo/fine-tuned-text-summarization
[ "tensorboard", "safetensors", "generated_from_trainer", "base_model:google/flan-t5-base", "license:apache-2.0", "region:us" ]
2023-11-12T17:50:40+00:00
[]
[]
TAGS #tensorboard #safetensors #generated_from_trainer #base_model-google/flan-t5-base #license-apache-2.0 #region-us
# fine-tuned-text-summarization This model is a fine-tuned version of google/flan-t5-base on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hype...
[ "# fine-tuned-text-summarization\n\nThis model is a fine-tuned version of google/flan-t5-base on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training pr...
[ "TAGS\n#tensorboard #safetensors #generated_from_trainer #base_model-google/flan-t5-base #license-apache-2.0 #region-us \n", "# fine-tuned-text-summarization\n\nThis model is a fine-tuned version of google/flan-t5-base on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended us...
[ 42, 36, 6, 12, 8, 3, 90, 4, 33 ]
[ "passage: TAGS\n#tensorboard #safetensors #generated_from_trainer #base_model-google/flan-t5-base #license-apache-2.0 #region-us \n# fine-tuned-text-summarization\n\nThis model is a fine-tuned version of google/flan-t5-base on an unknown dataset.## Model description\n\nMore information needed## Intended uses & limi...
[ -0.1221853494644165, 0.1370992362499237, -0.0010247378377243876, 0.10777068883180618, 0.14110660552978516, 0.006529451813548803, 0.10958582907915115, 0.09449248760938644, -0.08342529833316803, 0.09244300425052643, 0.09333959966897964, -0.001905346056446433, 0.04241875559091568, 0.212567627...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ICU_Returns_BioClinicalBERT This model is a fine-tuned version of [emilyalsentzer/Bio_ClinicalBERT](https://huggingface.co/emily...
{"license": "mit", "tags": ["generated_from_trainer"], "base_model": "emilyalsentzer/Bio_ClinicalBERT", "model-index": [{"name": "ICU_Returns_BioClinicalBERT", "results": []}]}
text-classification
moro01525/ICU_Returns_BioClinicalBERT
[ "transformers", "pytorch", "bert", "text-classification", "generated_from_trainer", "base_model:emilyalsentzer/Bio_ClinicalBERT", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T17:53:50+00:00
[]
[]
TAGS #transformers #pytorch #bert #text-classification #generated_from_trainer #base_model-emilyalsentzer/Bio_ClinicalBERT #license-mit #autotrain_compatible #endpoints_compatible #region-us
ICU\_Returns\_BioClinicalBERT ============================= This model is a fine-tuned version of emilyalsentzer/Bio\_ClinicalBERT on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 1.7775 * F1:: 0.7063 * Roc Auc: 0.7198 * Precision with 0:: 0.8846 * Precision with 1:: 0.6538 * ...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 13", "### Trai...
[ "TAGS\n#transformers #pytorch #bert #text-classification #generated_from_trainer #base_model-emilyalsentzer/Bio_ClinicalBERT #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: ...
[ 66, 97, 4, 33 ]
[ "passage: TAGS\n#transformers #pytorch #bert #text-classification #generated_from_trainer #base_model-emilyalsentzer/Bio_ClinicalBERT #license-mit #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rat...
[ -0.09056839346885681, 0.08475185930728912, -0.0015843362780287862, 0.10697293281555176, 0.16269953548908234, 0.03651510924100876, 0.1327241212129593, 0.1209334135055542, -0.0702114850282669, 0.029579630121588707, 0.12896092236042023, 0.13817715644836426, -0.0012983549386262894, 0.125189244...
null
null
transformers
ExllamaV2 version of the model created by BlueNipples! Original Model https://huggingface.co/BlueNipples/TimeCrystal-l2-13B Requires ExllamaV2, which is being developed by turboderp https://github.com/turboderp/exllamav2 under an MIT license. Main branch is 8bpw 8h ---- This 13B model, TimeCrystal-l2-13B is bui...
{"license": "apache-2.0", "tags": ["llama-2", "roleplaying"]}
text-generation
AzureBlack/TimeCrystal-l2-13B-exl2
[ "transformers", "safetensors", "llama", "text-generation", "llama-2", "roleplaying", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
2023-11-12T17:57:04+00:00
[]
[]
TAGS #transformers #safetensors #llama #text-generation #llama-2 #roleplaying #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
ExllamaV2 version of the model created by BlueNipples! Original Model URL Requires ExllamaV2, which is being developed by turboderp URL under an MIT license. Main branch is 8bpw 8h ---- This 13B model, TimeCrystal-l2-13B is built to maximize logic and instruct following, whilst also increasing the vividness of ...
[]
[ "TAGS\n#transformers #safetensors #llama #text-generation #llama-2 #roleplaying #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n" ]
[ 64 ]
[ "passage: TAGS\n#transformers #safetensors #llama #text-generation #llama-2 #roleplaying #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n" ]
[ -0.0013048341497778893, 0.02985749952495098, -0.006491969805210829, 0.02622772380709648, 0.0847533792257309, -0.03205602988600731, 0.18770553171634674, 0.11261523514986038, -0.03938153386116028, -0.029700199142098427, 0.14478813111782074, 0.1644625961780548, -0.011986946687102318, 0.063405...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt-neo-125M-couples_therapist_full_renamed This model is a fine-tuned version of [EleutherAI/gpt-neo-125M](https://huggingface....
{"license": "mit", "tags": ["generated_from_trainer"], "base_model": "EleutherAI/gpt-neo-125M", "model-index": [{"name": "gpt-neo-125M-couples_therapist_full_renamed", "results": []}]}
text-generation
ailments/gpt-neo-125M-couples_therapist_full_renamed
[ "transformers", "tensorboard", "safetensors", "gpt_neo", "text-generation", "generated_from_trainer", "base_model:EleutherAI/gpt-neo-125M", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T18:00:45+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #gpt_neo #text-generation #generated_from_trainer #base_model-EleutherAI/gpt-neo-125M #license-mit #autotrain_compatible #endpoints_compatible #region-us
gpt-neo-125M-couples\_therapist\_full\_renamed ============================================== This model is a fine-tuned version of EleutherAI/gpt-neo-125M on the None dataset. It achieves the following results on the evaluation set: * Loss: 3.0778 Model description ----------------- More information needed I...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 3.0", "### Traini...
[ "TAGS\n#transformers #tensorboard #safetensors #gpt_neo #text-generation #generated_from_trainer #base_model-EleutherAI/gpt-neo-125M #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\...
[ 73, 98, 4, 33 ]
[ "passage: TAGS\n#transformers #tensorboard #safetensors #gpt_neo #text-generation #generated_from_trainer #base_model-EleutherAI/gpt-neo-125M #license-mit #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learni...
[ -0.09340846538543701, 0.06741952151060104, -0.0018559551099315286, 0.10453764349222183, 0.13651709258556366, 0.016949983313679695, 0.1594475656747818, 0.11260412633419037, -0.07910064607858658, 0.060557398945093155, 0.13735359907150269, 0.11269506812095642, 0.018921131268143654, 0.13898272...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ICU_Returns_COReClinicalBioBERT This model is a fine-tuned version of [bvanaken/CORe-clinical-outcome-biobert-v1](https://huggin...
{"tags": ["generated_from_trainer"], "base_model": "bvanaken/CORe-clinical-outcome-biobert-v1", "model-index": [{"name": "ICU_Returns_COReClinicalBioBERT", "results": []}]}
text-classification
moro01525/ICU_Returns_COReClinicalBioBERT
[ "transformers", "pytorch", "bert", "text-classification", "generated_from_trainer", "base_model:bvanaken/CORe-clinical-outcome-biobert-v1", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T18:04:18+00:00
[]
[]
TAGS #transformers #pytorch #bert #text-classification #generated_from_trainer #base_model-bvanaken/CORe-clinical-outcome-biobert-v1 #autotrain_compatible #endpoints_compatible #region-us
ICU\_Returns\_COReClinicalBioBERT ================================= This model is a fine-tuned version of bvanaken/CORe-clinical-outcome-biobert-v1 on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 1.8391 * F1:: 0.7210 * Roc Auc: 0.7335 * Precision with 0:: 0.9048 * Precision w...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 13", "### Trai...
[ "TAGS\n#transformers #pytorch #bert #text-classification #generated_from_trainer #base_model-bvanaken/CORe-clinical-outcome-biobert-v1 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0...
[ 66, 97, 4, 33 ]
[ "passage: TAGS\n#transformers #pytorch #bert #text-classification #generated_from_trainer #base_model-bvanaken/CORe-clinical-outcome-biobert-v1 #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: ...
[ -0.0993773490190506, 0.0661829262971878, -0.00272336695343256, 0.100232794880867, 0.16182821989059448, 0.04326564818620682, 0.10973936319351196, 0.12175281345844269, -0.09269033372402191, 0.03996890038251877, 0.13132980465888977, 0.1315375566482544, -0.0022924623917788267, 0.13698230683803...
null
null
peft
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** ...
{"library_name": "peft", "base_model": "meta-llama/Llama-2-7b-hf"}
null
joshswartz/model_d2_llama_wikihow_cc
[ "peft", "arxiv:1910.09700", "base_model:meta-llama/Llama-2-7b-hf", "region:us" ]
2023-11-12T18:05:53+00:00
[ "1910.09700" ]
[]
TAGS #peft #arxiv-1910.09700 #base_model-meta-llama/Llama-2-7b-hf #region-us
# Model Card for Model ID ## Model Details ### Model Description - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: #...
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [option...
[ "TAGS\n#peft #arxiv-1910.09700 #base_model-meta-llama/Llama-2-7b-hf #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from mode...
[ 36, 6, 3, 54, 28, 3, 4, 9, 9, 10, 42, 20, 3, 4, 5, 9, 11, 13, 3, 12, 5, 4, 5, 3, 4, 9, 53, 9, 8, 6, 3, 14, 8, 7, 9, 4, 163, 11 ]
[ "passage: TAGS\n#peft #arxiv-1910.09700 #base_model-meta-llama/Llama-2-7b-hf #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [option...
[ -0.10044248402118683, 0.18992742896080017, -0.0031633442267775536, 0.032848432660102844, 0.0898432508111, 0.020555412396788597, 0.0514112152159214, 0.1319137066602707, -0.028625067323446274, 0.10301047563552856, 0.06944341957569122, 0.10447767376899719, 0.10382714867591858, 0.1985284984111...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # segformer-finetuned-coasts-final This model is a fine-tuned version of [peldrak/segformer-finetuned-coastalDataset](https://hugg...
{"license": "other", "tags": ["vision", "image-segmentation", "generated_from_trainer"], "base_model": "peldrak/segformer-finetuned-coastalDataset", "model-index": [{"name": "segformer-finetuned-coasts-final", "results": []}]}
image-segmentation
peldrak/segformer-finetuned-coasts-final
[ "transformers", "pytorch", "segformer", "vision", "image-segmentation", "generated_from_trainer", "base_model:peldrak/segformer-finetuned-coastalDataset", "license:other", "endpoints_compatible", "region:us" ]
2023-11-12T18:23:08+00:00
[]
[]
TAGS #transformers #pytorch #segformer #vision #image-segmentation #generated_from_trainer #base_model-peldrak/segformer-finetuned-coastalDataset #license-other #endpoints_compatible #region-us
segformer-finetuned-coasts-final ================================ This model is a fine-tuned version of peldrak/segformer-finetuned-coastalDataset on the peldrak/coastal2 dataset. It achieves the following results on the evaluation set: * Loss: 0.2563 * Mean Iou: 0.5765 * Mean Accuracy: 0.7934 * Overall Accuracy: 0...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 6e-05\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 4\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training...
[ "TAGS\n#transformers #pytorch #segformer #vision #image-segmentation #generated_from_trainer #base_model-peldrak/segformer-finetuned-coastalDataset #license-other #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rat...
[ 65, 98, 4, 30 ]
[ "passage: TAGS\n#transformers #pytorch #segformer #vision #image-segmentation #generated_from_trainer #base_model-peldrak/segformer-finetuned-coastalDataset #license-other #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_...
[ -0.10850391536951065, 0.035926368087530136, -0.0028501360211521387, 0.11110061407089233, 0.14993955194950104, 0.038158874958753586, 0.11395425349473953, 0.10601773113012314, -0.11149395257234573, 0.030759459361433983, 0.10913681983947754, 0.12291571497917175, 0.006016973406076431, 0.124734...
null
null
null
# Lora of akira_makino_onichichi This model is trained with [HCP-Diffusion](https://github.com/7eu7d7/HCP-Diffusion). And the auto-training framework is maintained by [DeepGHS Team](https://huggingface.co/deepghs). The base model used during training is [NAI](https://huggingface.co/deepghs/animefull-latest), and the...
{"license": "mit", "tags": ["art"], "datasets": ["CyberHarem/akira_makino_onichichi"], "pipeline_tag": "text-to-image"}
text-to-image
CyberHarem/akira_makino_onichichi
[ "art", "text-to-image", "dataset:CyberHarem/akira_makino_onichichi", "license:mit", "region:us" ]
2023-11-12T18:27:36+00:00
[]
[]
TAGS #art #text-to-image #dataset-CyberHarem/akira_makino_onichichi #license-mit #region-us
Lora of akira\_makino\_onichichi ================================ This model is trained with HCP-Diffusion. And the auto-training framework is maintained by DeepGHS Team. The base model used during training is NAI, and the base model used for generating preview images is Meina/MeinaMix\_V11. After downloading the...
[]
[ "TAGS\n#art #text-to-image #dataset-CyberHarem/akira_makino_onichichi #license-mit #region-us \n" ]
[ 38 ]
[ "passage: TAGS\n#art #text-to-image #dataset-CyberHarem/akira_makino_onichichi #license-mit #region-us \n" ]
[ 0.009592467918992043, 0.09018023312091827, -0.0039832983165979385, 0.11452822387218475, 0.126128688454628, 0.07561339437961578, 0.30467352271080017, 0.09213471412658691, 0.07940223813056946, -0.018436985090374947, 0.14162461459636688, 0.06716171652078629, 0.03528561070561409, 0.03555835038...
null
null
stable-baselines3
# **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framewor...
{"library_name": "stable-baselines3", "tags": ["SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "stable-baselines3"], "model-index": [{"name": "DQN", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "SpaceInvadersNoFram...
reinforcement-learning
VenomAI/DQN-SpaceInvadersNFS-v4
[ "stable-baselines3", "SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
2023-11-12T18:30:13+00:00
[]
[]
TAGS #stable-baselines3 #SpaceInvadersNoFrameskip-v4 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us
# DQN Agent playing SpaceInvadersNoFrameskip-v4 This is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4 using the stable-baselines3 library and the RL Zoo. The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents...
[ "# DQN Agent playing SpaceInvadersNoFrameskip-v4\nThis is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4\nusing the stable-baselines3 library\nand the RL Zoo.\n\nThe RL Zoo is a training framework for Stable Baselines3\nreinforcement learning agents,\nwith hyperparameter optimization and pre-tra...
[ "TAGS\n#stable-baselines3 #SpaceInvadersNoFrameskip-v4 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us \n", "# DQN Agent playing SpaceInvadersNoFrameskip-v4\nThis is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4\nusing the stable-baselines3 library\nand the RL Zoo...
[ 43, 90, 73, 9, 5, 7 ]
[ "passage: TAGS\n#stable-baselines3 #SpaceInvadersNoFrameskip-v4 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us \n# DQN Agent playing SpaceInvadersNoFrameskip-v4\nThis is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4\nusing the stable-baselines3 library\nand the RL ...
[ 0.043572068214416504, 0.2414778620004654, -0.0026879787910729647, 0.012635791674256325, 0.05784223601222038, 0.0030472534708678722, 0.08585051447153091, 0.10650663822889328, 0.024212315678596497, -0.001382096204906702, 0.003954293206334114, 0.17533031105995178, 0.03632635250687599, 0.13125...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # arabert-fully-supervised-arabic-propaganda This model is a fine-tuned version of [aubmindlab/bert-base-arabertv02-twitter](https...
{"tags": ["generated_from_trainer"], "metrics": ["accuracy", "precision", "recall", "f1"], "base_model": "aubmindlab/bert-base-arabertv02-twitter", "model-index": [{"name": "arabert-fully-supervised-arabic-propaganda", "results": []}]}
text-classification
Bmalmotairy/arabert-fully-supervised-arabic-propaganda
[ "transformers", "tensorboard", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:aubmindlab/bert-base-arabertv02-twitter", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T18:30:16+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #bert #text-classification #generated_from_trainer #base_model-aubmindlab/bert-base-arabertv02-twitter #autotrain_compatible #endpoints_compatible #region-us
arabert-fully-supervised-arabic-propaganda ========================================== This model is a fine-tuned version of aubmindlab/bert-base-arabertv02-twitter on the None dataset. It achieves the following results on the evaluation set: * Loss: 0.4417 * Accuracy: 0.9167 * Precision: 0.5577 * Recall: 0.7073 * F...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_ratio...
[ "TAGS\n#transformers #tensorboard #safetensors #bert #text-classification #generated_from_trainer #base_model-aubmindlab/bert-base-arabertv02-twitter #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learn...
[ 68, 116, 4, 33 ]
[ "passage: TAGS\n#transformers #tensorboard #safetensors #bert #text-classification #generated_from_trainer #base_model-aubmindlab/bert-base-arabertv02-twitter #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* le...
[ -0.08724016696214676, 0.09985264390707016, -0.004035210236907005, 0.10187695920467377, 0.12054022401571274, 0.010918018408119678, 0.15931177139282227, 0.1451641321182251, -0.06599672883749008, 0.07374264299869537, 0.13771864771842957, 0.1183827817440033, 0.015180151909589767, 0.15344068408...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-v3-large-survey-new_fact_main_passage-rater This model is a fine-tuned version of [microsoft/deberta-v3-large](https://h...
{"license": "mit", "tags": ["generated_from_trainer"], "model-index": [{"name": "deberta-v3-large-survey-new_fact_main_passage-rater", "results": []}]}
text-classification
domenicrosati/deberta-v3-large-survey-new_fact_main_passage-rater
[ "transformers", "pytorch", "tensorboard", "deberta-v2", "text-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T18:37:29+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #deberta-v2 #text-classification #generated_from_trainer #license-mit #autotrain_compatible #endpoints_compatible #region-us
deberta-v3-large-survey-new\_fact\_main\_passage-rater ====================================================== This model is a fine-tuned version of microsoft/deberta-v3-large on the None dataset. It achieves the following results on the evaluation set: * Loss: 0.2742 * Krippendorff: 0.9302 * Spearman: 0.9541 * Abso...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 6e-06\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps...
[ "TAGS\n#transformers #pytorch #tensorboard #deberta-v2 #text-classification #generated_from_trainer #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 6e-06\n* train\\_batch\\_...
[ 57, 131, 4, 32 ]
[ "passage: TAGS\n#transformers #pytorch #tensorboard #deberta-v2 #text-classification #generated_from_trainer #license-mit #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 6e-06\n* train\\_batch...
[ -0.09419464319944382, 0.0819736123085022, -0.00423989724367857, 0.08212623745203018, 0.1398732215166092, 0.008019800297915936, 0.12661798298358917, 0.14956742525100708, -0.10706411302089691, 0.04653304070234299, 0.12092546373605728, 0.17130465805530548, 0.031082550063729286, 0.158726081252...
null
null
peft
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Info...
{"library_name": "peft", "base_model": "Deci/DeciCoder-1b"}
null
CShorten/decicoder-50m-updated-schemaSplit-10k-steps
[ "peft", "safetensors", "arxiv:1910.09700", "base_model:Deci/DeciCoder-1b", "region:us" ]
2023-11-12T18:37:32+00:00
[ "1910.09700" ]
[]
TAGS #peft #safetensors #arxiv-1910.09700 #base_model-Deci/DeciCoder-1b #region-us
# Model Card for Model ID ## Model Details ### Model Description - Developed by: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ...
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:"...
[ "TAGS\n#peft #safetensors #arxiv-1910.09700 #base_model-Deci/DeciCoder-1b #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "#...
[ 37, 6, 3, 45, 28, 3, 4, 9, 9, 10, 42, 20, 3, 4, 5, 9, 11, 13, 3, 12, 5, 4, 5, 3, 4, 9, 53, 9, 8, 6, 3, 14, 8, 7, 9, 4, 164, 14 ]
[ "passage: TAGS\n#peft #safetensors #arxiv-1910.09700 #base_model-Deci/DeciCoder-1b #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\n\n\n- Developed by: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model Source...
[ -0.0890924334526062, 0.20476669073104858, -0.004046343732625246, 0.029457159340381622, 0.08862961083650589, 0.025224218145012856, 0.06072753295302391, 0.10459984838962555, -0.06071311607956886, 0.10535138100385666, 0.05466222018003464, 0.08295578509569168, 0.10219942033290863, 0.1922721117...
null
null
transformers
# Model Card ## Overview This document provides details about the training process and performance metrics for a machine learning model. The model is designed for a specific task, and the following table summarizes its performance at different training steps. ## Performance Metrics | Step | Training Loss | Validati...
{}
token-classification
DataIntelligenceTeam/Tansport1.4
[ "transformers", "pytorch", "layoutlmv3", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T18:42:03+00:00
[]
[]
TAGS #transformers #pytorch #layoutlmv3 #token-classification #autotrain_compatible #endpoints_compatible #region-us
Model Card ========== Overview -------- This document provides details about the training process and performance metrics for a machine learning model. The model is designed for a specific task, and the following table summarizes its performance at different training steps. Performance Metrics -------------------...
[]
[ "TAGS\n#transformers #pytorch #layoutlmv3 #token-classification #autotrain_compatible #endpoints_compatible #region-us \n" ]
[ 41 ]
[ "passage: TAGS\n#transformers #pytorch #layoutlmv3 #token-classification #autotrain_compatible #endpoints_compatible #region-us \n" ]
[ -0.05842987820506096, 0.029088284820318222, -0.008562782779335976, 0.03329263627529144, 0.17634959518909454, 0.03315509855747223, 0.06917909532785416, 0.09448128938674927, 0.005243215244263411, -0.029991144314408302, 0.12003349512815475, 0.2599136531352997, -0.028157811611890793, 0.1245651...
null
null
null
# **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
{"tags": ["Pixelcopter-PLE-v0", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class"], "model-index": [{"name": "Reinforce-Pixelcopter-PLE-v0", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "Pixelcopter-PLE-v0", "type": "Pixelco...
reinforcement-learning
AF6ECHO/Reinforce-Pixelcopter-PLE-v0
[ "Pixelcopter-PLE-v0", "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class", "model-index", "region:us" ]
2023-11-12T18:55:56+00:00
[]
[]
TAGS #Pixelcopter-PLE-v0 #reinforce #reinforcement-learning #custom-implementation #deep-rl-class #model-index #region-us
# Reinforce Agent playing Pixelcopter-PLE-v0 This is a trained model of a Reinforce agent playing Pixelcopter-PLE-v0 . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: URL
[ "# Reinforce Agent playing Pixelcopter-PLE-v0\n This is a trained model of a Reinforce agent playing Pixelcopter-PLE-v0 .\n To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: URL" ]
[ "TAGS\n#Pixelcopter-PLE-v0 #reinforce #reinforcement-learning #custom-implementation #deep-rl-class #model-index #region-us \n", "# Reinforce Agent playing Pixelcopter-PLE-v0\n This is a trained model of a Reinforce agent playing Pixelcopter-PLE-v0 .\n To learn to use this model and train yours check Unit 4 of ...
[ 41, 58 ]
[ "passage: TAGS\n#Pixelcopter-PLE-v0 #reinforce #reinforcement-learning #custom-implementation #deep-rl-class #model-index #region-us \n# Reinforce Agent playing Pixelcopter-PLE-v0\n This is a trained model of a Reinforce agent playing Pixelcopter-PLE-v0 .\n To learn to use this model and train yours check Unit 4 ...
[ 0.0073175891302526, -0.2259262204170227, -0.0017347558168694377, 0.05054566636681557, 0.0658537745475769, -0.055378563702106476, 0.1412602812051773, 0.05916554853320122, -0.04990595206618309, 0.059261854737997055, 0.14166708290576935, 0.03996060788631439, 0.022112762555480003, 0.1513713151...
null
null
null
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # uplimit-project-3-phi-1.5 This model is a fine-tuned version of [microsoft/phi-1_5](https://huggingface.co/microsoft/phi-1_5) on...
{"license": "other", "tags": ["generated_from_trainer"], "datasets": ["scitldr"], "base_model": "microsoft/phi-1_5", "model-index": [{"name": "uplimit-project-3-phi-1.5", "results": []}]}
null
sergoumaya/uplimit-project-3-phi-1.5
[ "tensorboard", "safetensors", "generated_from_trainer", "dataset:scitldr", "base_model:microsoft/phi-1_5", "license:other", "region:us" ]
2023-11-12T18:57:20+00:00
[]
[]
TAGS #tensorboard #safetensors #generated_from_trainer #dataset-scitldr #base_model-microsoft/phi-1_5 #license-other #region-us
uplimit-project-3-phi-1.5 ========================= This model is a fine-tuned version of microsoft/phi-1\_5 on the scitldr dataset. It achieves the following results on the evaluation set: * Loss: 2.5338 Model description ----------------- More information needed Intended uses & limitations -----------------...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.001\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training...
[ "TAGS\n#tensorboard #safetensors #generated_from_trainer #dataset-scitldr #base_model-microsoft/phi-1_5 #license-other #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.001\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* s...
[ 46, 97, 4, 33 ]
[ "passage: TAGS\n#tensorboard #safetensors #generated_from_trainer #dataset-scitldr #base_model-microsoft/phi-1_5 #license-other #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.001\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n...
[ -0.10731098800897598, 0.020247573032975197, -0.0011501925764605403, 0.09952946752309799, 0.18258105218410492, 0.019899843260645866, 0.14197766780853271, 0.06880845129489899, -0.07539979368448257, 0.0654732882976532, 0.10352106392383575, 0.14995481073856354, 0.007693091407418251, 0.12170831...
null
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fine-tuned-led-base-book-summary This model is a fine-tuned version of [pszemraj/led-base-book-summary](https://huggingface.co/p...
{"license": "bsd-3-clause", "tags": ["generated_from_trainer"], "base_model": "pszemraj/led-base-book-summary", "model-index": [{"name": "fine-tuned-led-base-book-summary", "results": []}]}
text2text-generation
Narya-ai/fine-tuned-led-base-book-summary
[ "transformers", "safetensors", "led", "text2text-generation", "generated_from_trainer", "base_model:pszemraj/led-base-book-summary", "license:bsd-3-clause", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-12T19:01:34+00:00
[]
[]
TAGS #transformers #safetensors #led #text2text-generation #generated_from_trainer #base_model-pszemraj/led-base-book-summary #license-bsd-3-clause #autotrain_compatible #endpoints_compatible #region-us
fine-tuned-led-base-book-summary ================================ This model is a fine-tuned version of pszemraj/led-base-book-summary on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 2.5918 * Rouge2 Precision: 0.0778 * Rouge2 Recall: 0.1291 * Rouge2 Fmeasure: 0.0958 Model d...
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-06\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 4\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e...
[ "TAGS\n#transformers #safetensors #led #text2text-generation #generated_from_trainer #base_model-pszemraj/led-base-book-summary #license-bsd-3-clause #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learn...
[ 73, 141, 4, 30 ]
[ "passage: TAGS\n#transformers #safetensors #led #text2text-generation #generated_from_trainer #base_model-pszemraj/led-base-book-summary #license-bsd-3-clause #autotrain_compatible #endpoints_compatible #region-us \n### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* le...
[ -0.1295376718044281, 0.054822031408548355, -0.0012364510912448168, 0.06101413071155548, 0.14621852338314056, -0.0010982821695506573, 0.11427124589681625, 0.1317196488380432, -0.13666890561580658, 0.06607465445995331, 0.10685636103153229, 0.08442993462085724, 0.038446132093667984, 0.1578665...
null
null
peft
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Info...
{"library_name": "peft", "base_model": "meta-llama/Llama-2-7b-hf"}
null
Mavitu56/LLamaEmergency
[ "peft", "safetensors", "arxiv:1910.09700", "base_model:meta-llama/Llama-2-7b-hf", "region:us" ]
2023-11-12T19:04:51+00:00
[ "1910.09700" ]
[]
TAGS #peft #safetensors #arxiv-1910.09700 #base_model-meta-llama/Llama-2-7b-hf #region-us
# Model Card for Model ID ## Model Details ### Model Description - Developed by: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ...
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:"...
[ "TAGS\n#peft #safetensors #arxiv-1910.09700 #base_model-meta-llama/Llama-2-7b-hf #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:...
[ 41, 6, 3, 45, 28, 3, 4, 9, 9, 10, 42, 20, 3, 4, 5, 9, 11, 13, 3, 12, 5, 4, 5, 3, 4, 9, 53, 9, 8, 6, 3, 14, 8, 7, 9, 4, 164, 14 ]
[ "passage: TAGS\n#peft #safetensors #arxiv-1910.09700 #base_model-meta-llama/Llama-2-7b-hf #region-us \n# Model Card for Model ID## Model Details### Model Description\n\n\n\n\n\n- Developed by: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:### Model...
[ -0.09958921372890472, 0.17822016775608063, -0.00342088402248919, 0.03716764226555824, 0.08536183089017868, 0.02169986627995968, 0.05467161908745766, 0.12298179417848587, -0.04951082170009613, 0.09634580463171005, 0.06148029491305351, 0.10814239829778671, 0.09265368431806564, 0.188607484102...