output0 = automatic_speech_recognition(audio="1284-1180-0003.flac")
output1 = question_answering(text=output0['text'], question="what place did the person from the audio wish to explore?")
from mnm.tool_api import automatic_speech_recognition, question_answering
audio_file_path = '/root/input_data/1284-1180-0003.flac'
transcription = automatic_speech_recognition(audio=audio_file_path)
transcribed_text = transcription['text']
question = "What place did the person from the audio wish to explore?"
answer = question_answering(question=question, text=transcribed_text)
automatic_speech_recognition
question_answering
automatic_speech_recognition
question_answering
output0 = optical_character_recognition(image="COCO_train2014_000000256855.jpg")
output1 = wikipedia_simple_search(text=output0['text'])
output2 = text_classification(text=output1['text'])
from mnm.tool_api import optical_character_recognition, wikipedia_simple_search, text_classification
output0 = optical_character_recognition('/root/input_data/COCO_train2014_000000256855.jpg')
output1 = wikipedia_simple_search(output0['text'])
output2 = text_classification(output1['text'])
optical_character_recognition
wikipedia_simple_search
text_classification
optical_character_recognition
wikipedia_simple_search
text_classification
output0 = wikipedia_simple_search(text="What is monkeypox")
output1 = text_generation(text=f"create a compelling storyline about Monkeypox based on {output0['text']}")
output2 = text_classification(text=output1['text'])
from mnm.tool_api import wikipedia_simple_search, text_generation, text_classification
output0 = wikipedia_simple_search("Monkeypox")
output1 = text_generation(output0['text'])
output2 = text_classification(output0['text'])
wikipedia_simple_search
text_generation
text_classification
wikipedia_simple_search
text_generation
text_classification
output0 = image_classification(image="14021.jpg")
output1 = text_generation(text=f"a fun story relating to the object or creature identified in {output0['text']}")
output2 = question_answering(text=output1['text'], question="What adventure did the main character of the story go on?")
from PIL import Image
from mnm.tool_api import image_classification, text_generation, question_answering
# Load the image using PIL
image_path = '/root/input_data/14021.jpg'
image = Image.open(image_path)
# Classify the image
classification_result = image_classification(image)
# Generate a fun story related to the classified object 'gasmask, respirator, gas helmet'
prompt = "In a post-apocalyptic world, a lone survivor finds an old gasmask. This gasmask isn't ordinary; it has a story to tell. The survivor puts it on and suddenly, "
story_result = text_generation(prompt)
# The question about the main character's adventure
question = "What adventure did the main character of the story go on?"
# Answer the question based on the story text
adventure_result = question_answering(question, story_result['text'])
image_classification
text_generation
question_answering
image_classification
text_generation
question_answering
output0 = get_date_fact(date="20-Jul")
output1 = text_generation(text=f"an engaging anecdote based on {output0['text']}")
output2 = text_summarization(text=output1['text'])
from mnm.tool_api import get_date_fact, text_generation, text_summarization
facts_about_july_20 = get_date_fact("July 20")
anecdote = text_generation(f"On July 20, {facts_about_july_20['year']}, a significant event unfolded: {facts_about_july_20['text']}. Let me tell you a story about that day.")
brief_anecdote = text_summarization(anecdote['text'])
get_date_fact
text_generation
text_summarization
get_date_fact
text_generation
text_summarization
output0 = get_date_fact(date="11-Jul")
output1 = text_summarization(text=output0['text'])
output2 = image_generation(text=f"an engaging visual representation related to {output1['text']}")
from mnm.tool_api import get_date_fact, text_summarization, image_generation
output0 = get_date_fact("July 11")
output1 = text_summarization(output0['text'])
output2 = image_generation(output1['text'])
get_date_fact
text_summarization
image_generation
get_date_fact
text_summarization
image_generation
output0 = image_captioning(image="2336649.jpg")
output1 = text_generation(text=f"Summarize the text: {output0['text']}")
output2 = text_generation(text=f"a narrative continuation of {output1['text']}")
from mnm.tool_api import image_captioning, text_generation
from PIL import Image
import io
# Load the image and convert it to a PIL image
image_path = '/root/input_data/2336649.jpg'
with open(image_path, 'rb') as image_file:
image_data = image_file.read()
image = Image.open(io.BytesIO(image_data))
# Generate a caption for the image
output0 = image_captioning(image)
# Use the summary from the image captioning as the prompt for text generation
summary_text = output0['text']
# Generate a continuation of the scene's narrative
output1 = text_generation(summary_text)
image_captioning
text_generation
text_generation
image_captioning
text_generation
output0 = get_date_fact(date="7-May")
output1 = text_classification(text=output0['text'])
from mnm.tool_api import get_date_fact, text_classification
fact_result = get_date_fact("05/07")
sentiment_result = text_classification(fact_result['text'])
get_date_fact
text_classification
get_date_fact
text_classification
output0 = text_generation(text="a brochure cover write-up that captures the spirit of the School programme aimed at creating a generation of high-tech entrepreneurs.")
output1 = image_generation(text=f"a vibrant image to represent {output0['text']}")
output2 = image_editing(image=output1['image'], prompt="add a group of diverse young people working together on a high-tech project")
from mnm.tool_api import text_generation, image_generation, image_editing
output0 = text_generation(text="Create a generation of high-tech entrepreneurs through a dedicated school programme that fosters innovation, collaboration, and technological expertise among young students.")
output1 = image_generation(text=output0['text'])
output2 = image_editing(image=output1['image'], prompt="Add a group of diverse young people working together on a high-tech project to the image.")
text_generation
image_generation
image_editing
text_generation
image_generation
image_editing
output0 = image_generation(text="an image depicting a woman worker looking at rear view mirror smiling")
output1 = image_captioning(image=output0['image'])
from mnm.tool_api import image_generation, image_captioning
output0 = image_generation(text='Women worker looking at rear view mirror smiling')
output1 = image_captioning(image=output0['image'])
image_generation
image_captioning
image_generation
image_captioning
output0 = get_year_fact(year="2014")
output1 = image_generation(text=f"an image representation of {output0['text']}")
output2 = object_detection(image=output1['image'])
from mnm.tool_api import get_year_fact, image_generation, object_detection
year_fact = get_year_fact("2014")
generated_image = image_generation(year_fact['text'])
detected_objects = object_detection(generated_image['image'])
get_year_fact
image_generation
object_detection
get_year_fact
image_generation
object_detection
output0 = search_movie(movie_title="Moonlight", movie_year="2016")
from mnm.tool_api import search_movie
movie_info = search_movie(movie_title="Moonlight", movie_year="2016")
search_movie
search_movie
output0 = love_calculator(first_name="Edward Cullen", second_name="Bella Swan")
output1 = get_trivia_fact(number=output0['number'])
output2 = text_classification(text=output1['text'])
from mnm.tool_api import love_calculator, get_trivia_fact, text_classification
output0 = love_calculator('Edward Cullen', 'Bella Swan')
output1 = get_trivia_fact(output0['number'])
output2 = text_classification(output1['text'])
love_calculator
get_trivia_fact
text_classification
love_calculator
get_trivia_fact
text_classification
output0 = get_date_fact(date="20-Jul")
output1 = text_generation(text=f"a vivid, elaborate narrative based on {output0['text']}")
output2 = text_summarization(text=output1['text'])
from mnm.tool_api import get_date_fact, text_generation, text_summarization
output0 = get_date_fact(date="July 20")
output1 = text_generation(text=output0['text'])
output2 = text_summarization(text=output1['text'])
get_date_fact
text_generation
text_summarization
get_date_fact
text_generation
text_summarization
output0 = image_segmentation(image="2353266.jpg")
output1 = count(objects=output0['objects'])
from mnm.tool_api import image_segmentation, count
segmentation_result = image_segmentation(image_path='/root/input_data/2353266.jpg')
number_of_sections = count(segmentation_result['objects'])
image_segmentation
count
image_segmentation
count
output0 = get_date_fact(date="4-May")
output1 = text_summarization(text=output0['text'])
output2 = wikipedia_simple_search(text=output1['text'])
from mnm.tool_api import get_date_fact, text_summarization, wikipedia_simple_search
output0 = get_date_fact("May 4")
output1 = text_summarization(output0['text'])
output2 = wikipedia_simple_search(output1['text'])
get_date_fact
text_summarization
wikipedia_simple_search
get_date_fact
text_summarization
wikipedia_simple_search
output0 = image_generation(text="An image focused on the upper portion of a woman wearing glasses posing behind her birthday cake.")
output1 = image_crop_top(image=output0['image'])
output2 = object_detection(image=output1['image'])
from mnm.tool_api import image_generation, image_crop_top, object_detection
output0 = image_generation(text="a woman wearing glasses posing behind her birthday cake")
output1 = image_crop_top(image=output0['image'])
output2 = object_detection(image=output1['image'])
image_generation
image_crop_top
object_detection
image_generation
image_crop_top
object_detection
output0 = get_trivia_fact(number="2")
output1 = text_generation(text=f"Summarize the text: {output0['text']}")
output2 = text_generation(text=f"an enlightening and detailed elaboration on {output1['text']}")
from mnm.tool_api import get_trivia_fact, text_generation
fact_about_number_2 = get_trivia_fact("2")
elaboration_about_number_2 = text_generation(fact_about_number_2['text'])
get_trivia_fact
text_generation
text_generation
get_trivia_fact
text_generation
output0 = text_generation(text="a text that outlines common Christmas preparations based on the prompt \"Poll: How are you preparing for Christmas?\"")
output1 = image_generation(text=f"a visual depiction of {output0['text']}")
output2 = image_segmentation(image=output1['image'])
from mnm.tool_api import text_generation, image_generation, image_segmentation
output0 = text_generation(text='Poll: How are you preparing for Christmas?')
output1 = image_generation(text='Decorating the house for Christmas with lights and a tree')
output2 = image_segmentation(image=output1['image'])
text_generation
image_generation
image_segmentation
text_generation
image_generation
image_segmentation
output0 = get_date_fact(date="19-Oct")
output1 = image_generation(text=f"a visual representation of {output0['text']}")
from mnm.tool_api import get_date_fact, image_generation
output0 = get_date_fact("October 19")
output1 = image_generation(text=output0['text'])
get_date_fact
image_generation
get_date_fact
image_generation
output0 = text_classification(text="it is amusing , and that 's all it needs to be . ")
from mnm.tool_api import text_classification
sentiment_result = text_classification("it is amusing , and that 's all it needs to be .")
text_classification
text_classification
output0 = image_editing(image="111376-input.png", prompt="Have the cow wear a hat.")
output1 = image_crop_left(image=output0['image'])
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import image_editing, image_crop_left, image_captioning
output0 = image_editing(image='/root/input_data/111376-input.png', prompt="a cow wearing a hat")
output1 = image_crop_left(image=output0['image'])
output2 = image_captioning(image=output1['image'])
image_editing
image_crop_left
image_captioning
image_editing
image_crop_left
image_captioning
output0 = search_movie(movie_title="CODA", movie_year="2021")
output1 = text_generation(text=f"a comprehensive review of the movie, CODA, based on {output0['text']}")
output2 = text_generation(text=f"Summarize the text: {output1['text']}")
from mnm.tool_api import search_movie, text_generation
output0 = search_movie(movie_title='CODA', movie_year='2021')
output1 = text_generation(text=f"Write a comprehensive review for the movie titled 'CODA', released in 2021. Here are some details: {output0['text']}")
output2 = text_generation(text=f"Condense the following comprehensive review into a brief, critical overview suitable for a blog post: {output1['text']}")
search_movie
text_generation
text_generation
search_movie
text_generation
text_generation
output0 = get_year_fact(year="2023")
output1 = text_generation(text=f"a captivating introduction for a blog post based on {output0['text']}")
output2 = image_generation(text=f"a creative illustration that matches {output1['text']}")
from mnm.tool_api import get_year_fact, text_generation, image_generation
year_facts = get_year_fact('2023')
intro_prompt = "In 2023, a remarkable engineering feat was accomplished as the Istanbul Canal reached completion. This event not only reshapes the geography but also marks a significant milestone in the annals of history. Let's delve into the year 2023, a year of innovation and progress, and explore the significant events that are anticipated to unfold."
introduction = text_generation(intro_prompt)
illustration = image_generation(intro_prompt)
get_year_fact
text_generation
image_generation
get_year_fact
text_generation
image_generation
output0 = image_captioning(image="11665.jpg")
output1 = text_generation(text=f"a comprehensive narrative based on {output0['text']}")
output2 = text_classification(text=output1['text'])
from mnm.tool_api import image_captioning, text_generation, text_classification
caption_result = image_captioning(image='/root/input_data/11665.jpg')
narrative_result = text_generation(text=caption_result['text'])
sentiment_result = text_classification(text=narrative_result['text'])
image_captioning
text_generation
text_classification
image_captioning
text_generation
text_classification
output0 = text_generation(text="a possible dialogue or continuation that could come naturally after \"You've got to ask yourself one question: Do I feel lucky?\"")
output1 = text_summarization(text=output0['text'])
from mnm.tool_api import text_generation, text_summarization
generated_dialogue = text_generation(text="You've got to ask yourself one question: Do I feel lucky?")
summary = text_summarization(text=generated_dialogue['text'])
text_generation
text_summarization
text_generation
text_summarization
output0 = question_answering(text="These chloroplasts, which can be traced back directly to a cyanobacterial ancestor, are known as primary plastids (\"plastid\" in this context means almost the same thing as chloroplast). All primary chloroplasts belong to one of three chloroplast lineages—the glaucophyte chloroplast lineage, the rhodophyte, or red algal chloroplast lineage, or the chloroplastidan, or green chloroplast lineage. The second two are the largest, and the green chloroplast lineage is the one that contains the land plants.", question="What does 'plastid' mean?")
from mnm.tool_api import question_answering
output0 = question_answering(question="What does 'plastid' mean?", text="These chloroplasts, which can be traced back directly to a cyanobacterial ancestor, are known as primary plastids ('plastid' in this context means almost the same thing as chloroplast). All primary chloroplasts belong to one of three chloroplast lineages—the glaucophyte chloroplast lineage, the rhodophyte, or red algal chloroplast lineage, or the chloroplastidan, or green chloroplast lineage. The second two are the largest, and the green chloroplast lineage is the one that contains the land plants.")
question_answering
question_answering
output0 = image_generation(text="a game environment based on a bathroom with a sink and television in it with a specific focus on the left side")
output1 = image_crop_left(image=output0['image'])
output2 = optical_character_recognition(image=output1['image'])
from mnm.tool_api import image_generation, image_crop_left, optical_character_recognition
output0 = image_generation("a bathroom with a sink and television in it")
output1 = image_crop_left(output0['image'])
output2 = optical_character_recognition(output1['image'])
image_generation
image_crop_left
optical_character_recognition
image_generation
image_crop_left
optical_character_recognition
output0 = get_trivia_fact(number="3")
output1 = image_generation(text=f"an image that visually symbolizes the interesting fact about the number 3 from {output0['text']}")
output2 = image_segmentation(image=output1['image'])
from mnm.tool_api import get_trivia_fact, image_generation, image_segmentation
fact_about_number_3 = get_trivia_fact("3")
generated_image = image_generation(fact_about_number_3['text'])
segmented_components = image_segmentation(generated_image['image'])
get_trivia_fact
image_generation
image_segmentation
get_trivia_fact
image_generation
image_segmentation
output0 = image_editing(image="270650-input.png", prompt="put skis on the wheel")
output1 = image_crop_bottom(image=output0['image'])
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import image_editing, image_crop_bottom, image_captioning
edited_image_result = image_editing(image='/root/input_data/270650-input.png', prompt='Add skis to the wheel of the vehicle in the photo.')
cropped_image_result = image_crop_bottom(image=edited_image_result['image'])
caption_result = image_captioning(image=cropped_image_result['image'])
image_editing
image_crop_bottom
image_captioning
image_editing
image_crop_bottom
image_captioning
output0 = image_classification(image="12633.jpg")
output1 = image_generation(text=f"a new stylized image of {output0['text']} for an upcoming presentation")
from mnm.tool_api import image_classification, image_generation
classification_result = image_classification(image='/root/input_data/12633.jpg')
stylized_image_result = image_generation(text=classification_result['text'])
image_classification
image_generation
image_classification
image_generation
output0 = get_trivia_fact(number="8")
from mnm.tool_api import get_trivia_fact
fact_about_number_8 = get_trivia_fact("8")
get_trivia_fact
get_trivia_fact
output0 = image_generation(text="An image of a woman playing with a video game controller for an invitation card.")
output1 = optical_character_recognition(image=output0['image'])
from mnm.tool_api import image_generation, optical_character_recognition
output0 = image_generation(text="a woman playing with a video game controller")
output1 = optical_character_recognition(image=output0['image'])
image_generation
optical_character_recognition
image_generation
optical_character_recognition
output0 = get_math_fact(number="7")
output1 = image_generation(text=f"a fun and festive birthday image using {output0['text']}")
output2 = image_editing(image=output1['image'], prompt="Overlay big, bold, and colourful 'Happy 7th Birthday!' text at the top of the image.")
from mnm.tool_api import get_math_fact, image_generation, image_editing
math_fact = get_math_fact('7')
description = "A birthday celebration with balloons, cake, and the number 7, which is the only dimension, besides the familiar 3, in which a vector cross product can be defined."
birthday_image = image_generation(description)
overlay_prompt = "Overlay big, bold, and colourful 'Happy 7th Birthday!' text at the top of the image."
final_image = image_editing(birthday_image['image'], overlay_prompt)
get_math_fact
image_generation
image_editing
get_math_fact
image_generation
image_editing
output0 = automatic_speech_recognition(audio="237-134500-0000.flac")
output1 = text_generation(text=f"a story that could naturally follow the conversation in {output0['text']}")
from mnm.tool_api import automatic_speech_recognition, text_generation
transcription = automatic_speech_recognition(audio='/root/input_data/237-134500-0000.flac')
story = text_generation(text=transcription['text'])
automatic_speech_recognition
text_generation
automatic_speech_recognition
text_generation
output0 = image_editing(image="270650-input.png", prompt="put skis on the wheel")
output1 = image_crop_bottom(image=output0['image'])
from mnm.tool_api import image_editing, image_crop_bottom
edited_image_result = image_editing(image='/root/input_data/270650-input.png', prompt="Add skis on the wheel.")
cropped_image_result = image_crop_bottom(image=edited_image_result['image'])
image_editing
image_crop_bottom
image_editing
image_crop_bottom
output0 = search_movie(movie_title="Everything Everywhere All at Once", movie_year="2022")
output1 = text_generation(text=f"a text description of an imaginary sequel based on {output0['text']}")
output2 = image_generation(text=f"a promotional poster design for the hypothetical sequel described in {output1['text']}")
from mnm.tool_api import search_movie, text_generation, image_generation
output0 = search_movie(movie_title="Everything Everywhere All at Once", movie_year="2022")
output1 = text_generation(text=output0['text'])
output2 = image_generation(text=output1['text'])
search_movie
text_generation
image_generation
search_movie
text_generation
image_generation
output0 = object_detection(image="1592452.jpg")
from PIL import Image
from mnm.tool_api import object_detection
image = Image.open('/root/input_data/1592452.jpg')
output0 = object_detection(image)
object_detection
object_detection
output0 = get_date_fact(date="4-May")
output1 = image_generation(text=f"a unique visual representation of {output0['text']}")
output2 = image_classification(image=output1['image'])
from mnm.tool_api import get_date_fact, image_generation, image_classification
output0 = get_date_fact("May 4th")
output1 = image_generation(output0['text'])
output2 = image_classification(output1['image'])
get_date_fact
image_generation
image_classification
get_date_fact
image_generation
image_classification
output0 = get_year_fact(year="2020")
output1 = image_generation(text=f"a striking representation of {output0['text']}")
output2 = optical_character_recognition(image=output1['image'])
from mnm.tool_api import get_year_fact, image_generation, optical_character_recognition
output0 = get_year_fact('2020')
output1 = image_generation(output0['text'])
output2 = optical_character_recognition(output1['image'])
get_year_fact
image_generation
optical_character_recognition
get_year_fact
image_generation
optical_character_recognition
output0 = search_movie(movie_title="12 Years a Slave", movie_year="2013")
output1 = text_generation(text=f"a brief summary based on the detailed information of the film from {output0['text']}")
output2 = image_generation(text=f"an intriguing image representation inspired by the summary from {output1['text']}")
from mnm.tool_api import search_movie, text_generation, image_generation
output0 = search_movie(movie_title='12 Years a Slave', movie_year='2013')
output1 = text_generation(text=output0['text'])
output2 = image_generation(text=output1['text'])
search_movie
text_generation
image_generation
search_movie
text_generation
image_generation
output0 = get_weather(lon="17.14", lat="32.81")
from mnm.tool_api import get_weather
weather_forecast = get_weather(lon="17.14", lat="32.81")
get_weather
get_weather
output0 = automatic_speech_recognition(audio="2300-131720-0000.flac")
output1 = text_summarization(text=output0['text'])
from mnm.tool_api import automatic_speech_recognition, text_summarization
transcription = automatic_speech_recognition(audio='/root/input_data/2300-131720-0000.flac')
summary = text_summarization(text=transcription['text'])
summary_text = summary['text']
automatic_speech_recognition
text_summarization
automatic_speech_recognition
text_summarization
output0 = image_captioning(image="17464.jpg")
output1 = text_generation(text=f"a captivating text about {output0['text']}")
output2 = text_summarization(text=output1['text'])
from mnm.tool_api import image_captioning, text_generation, text_summarization
output0 = image_captioning(image='/root/input_data/17464.jpg')
output1 = text_generation(text=output0['text'])
output2 = text_summarization(text=output1['text'])
image_captioning
text_generation
text_summarization
image_captioning
text_generation
text_summarization
output0 = search_movie(movie_title="12 Years a Slave", movie_year="2013")
output1 = text_generation(text=f"a retrospective highlighting the main plot twists of {output0['text']}")
output2 = image_generation(text=f"an image that evokes the mood and elements from {output1['text']}")
from mnm.tool_api import search_movie, text_generation, image_generation
output0 = search_movie(movie_title='12 Years a Slave', movie_year='2013')
output1 = text_generation(text="Retrospective of the movie '12 Years a Slave':")
output2 = image_generation(text=output1['text'])
search_movie
text_generation
image_generation
search_movie
text_generation
image_generation
output0 = get_year_fact(year="2023")
from mnm.tool_api import get_year_fact
output0 = get_year_fact("2023")
get_year_fact
get_year_fact
output0 = image_captioning(image="2350021.jpg")
output1 = text_summarization(text=output0['text'])
output2 = wikipedia_simple_search(text=output1['text'])
from PIL import Image
from mnm.tool_api import image_captioning, text_summarization, wikipedia_simple_search
# Load the image and generate a description
image = Image.open('/root/input_data/2350021.jpg')
description = image_captioning(image)
# Summarize the description
summary = text_summarization(description['text'])
# Find the most relevant Wikipedia page for the context
wikipedia_result = wikipedia_simple_search(summary['text'])
image_captioning
text_summarization
wikipedia_simple_search
image_captioning
text_summarization
wikipedia_simple_search
output0 = search_movie(movie_title="Argo", movie_year="2012")
output1 = text_summarization(text=output0['text'])
output2 = image_generation(text=f"a representative image that illustrates the essence of {output1['text']}")
from mnm.tool_api import search_movie, text_summarization, image_generation
output0 = search_movie(movie_title="Argo", movie_year="2012")
output1 = text_summarization(text=output0['text'].split('Plot: ')[1].strip())
output2 = image_generation(text=output1['text'])
search_movie
text_summarization
image_generation
search_movie
text_summarization
image_generation
output0 = object_detection(image="2382602.jpg")
output1 = select_object(objects=output0['objects'], object_name="motorcycle")
output2 = emoji(image="2382602.jpg", object=output1['object'], emoji="goblin")
from mnm.tool_api import object_detection, select_object, emoji
detection_result = object_detection(image='/root/input_data/2382602.jpg')
motorcycle_object = select_object(objects=detection_result['objects'], object_name='motorcycle')
result_image = emoji(image='/root/input_data/2382602.jpg', object=motorcycle_object['object'], emoji='goblin')
object_detection
select_object
emoji
object_detection
select_object
emoji
output0 = search_movie(movie_title="Argo", movie_year="2012")
output1 = text_generation(text=f"a convincing movie review based on {output0['text']} as if you personally watched it")
output2 = text_generation(text=output1['text'])
from mnm.tool_api import search_movie, text_generation
movie_info = search_movie(movie_title='Argo', movie_year='2012')
movie_review = text_generation(text=f"I just watched {movie_info['text']} It was an amazing experience!")
search_movie
text_generation
text_generation
search_movie
text_generation
output0 = image_editing(image="417814-input.png", prompt="Let the carpet be changed to wooden floor.")
output1 = object_detection(image=output0['image'])
output2 = count(objects=output1['objects'])
from PIL import Image
from mnm.tool_api import image_editing, object_detection, count
# Open the image using PIL
image_path = '/root/input_data/417814-input.png'
image = Image.open(image_path)
# Edit the image to replace the carpet with a wooden floor
output0 = image_editing(image, "Replace the carpet with a wooden floor.")
# Detect objects in the edited image
output1 = object_detection(output0['image'])
# Count the total number of objects in the edited image
output2 = count(output1['objects'])
image_editing
object_detection
count
image_editing
object_detection
count
output0 = image_segmentation(image="2385634.jpg")
from PIL import Image
from mnm.tool_api import image_segmentation
import numpy as np
import os
# Load the image using PIL
image_path = '/root/input_data/2385634.jpg'
image = Image.open(image_path)
# Perform image segmentation to get masks for all objects
segmentation_result = image_segmentation(image)
# Find the segmentation mask for the oven
oven_mask = None
for obj in segmentation_result['objects']:
if obj['label'] == 'oven':
oven_mask = obj['mask']
break
# Create a new image that highlights the oven by masking out the rest
oven_mask_image = np.repeat(oven_mask[:, :, np.newaxis], 3, axis=2)
oven_only_image = np.where(oven_mask_image, np.array(image), 0)
# Convert the oven_only_image numpy array to a PIL image
oven_only_pil_image = Image.fromarray(oven_only_image.astype(np.uint8))
# Define the output path for the oven-only image
output_image_path = '/root/output_data/oven_only_image.jpg'
# Create the output directory if it does not exist
os.makedirs(os.path.dirname(output_image_path), exist_ok=True)
# Save the oven-only image to the output path
oven_only_pil_image.save(output_image_path)
image_segmentation
image_segmentation
output0 = love_calculator(first_name="William Thacker", second_name="Anna Scott")
output1 = get_math_fact(number=output0['number'])
output2 = text_classification(text=output1['text'])
from mnm.tool_api import love_calculator, get_math_fact, text_classification
output0 = love_calculator(first_name="William Thacker", second_name="Anna Scott")
output1 = get_math_fact(number="random")
output2 = text_classification(text=output1['text'])
love_calculator
get_math_fact
text_classification
love_calculator
get_math_fact
text_classification
output0 = get_math_fact(number="3")
output1 = text_generation(text=f"an engaging and comprehensive lesson plan based on mathematical details of {output0['text']}")
output2 = text_generation(text=f"Summarize the text: {output1['text']}")
from mnm.tool_api import get_math_fact, text_generation
output0 = get_math_fact('3')
output1 = text_generation("Create a comprehensive and engaging lesson plan for students about the number 3. Start with the fact that a triangle is the only figure which, if all endpoints have hinges, will never change its shape unless the sides themselves are bent. Include activities and discussion points that help students explore the significance of the number 3 in mathematics.")
get_math_fact
text_generation
text_generation
get_math_fact
text_generation
output0 = visual_question_answering(image="2375698.jpg", question="Which place is it?")
output1 = image_generation(text=f"an image that represents the cultural aspects or landmarks of {output0['text']}")
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import image_captioning, image_generation, visual_question_answering
image_path = '/root/input_data/2375698.jpg'
caption_result = image_captioning(image_path)
question_about_location = "Which city is this train station located in?"
location_result = visual_question_answering(image_path, question_about_location)
generated_image_result = image_generation('Cultural landmarks of London')
new_image_caption_result = image_captioning(generated_image_result['image'])
visual_question_answering
image_generation
image_captioning
image_captioning
visual_question_answering
image_generation
image_captioning
output0 = get_date_fact(date="22-Mar")
output1 = image_generation(text=f"an image that accurately portrays the fascinating occurrence of {output0['text']}")
output2 = image_editing(image=output1['image'], prompt="include a small calendar on the top right corner, highlighting the date '22-Mar'")
from mnm.tool_api import get_date_fact, image_generation, image_editing
date_fact = get_date_fact("2023-03-22")
event_image = image_generation(date_fact['text'])
final_image = image_editing(event_image['image'], "Add a small calendar on the top right corner, highlighting the date '22-Mar'.")
get_date_fact
image_generation
image_editing
get_date_fact
image_generation
image_editing
output0 = image_generation(text="A detailed image of the left portion of a hotel room with a bed and a chair and white walls.")
output1 = image_crop_left(image=output0['image'])
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import image_generation, image_crop_left, image_captioning
output0 = image_generation(text="A hotel room with a bed and a chair and white walls")
output1 = image_crop_left(output0['image'])
output2 = image_captioning(output1['image'])
image_generation
image_crop_left
image_captioning
image_generation
image_crop_left
image_captioning
output0 = optical_character_recognition(image="COCO_train2014_000000028742.jpg")
from mnm.tool_api import optical_character_recognition
output0 = optical_character_recognition(image='/root/input_data/COCO_train2014_000000028742.jpg')
optical_character_recognition
optical_character_recognition
output0 = image_classification(image="11899.jpg")
output1 = wikipedia_simple_search(text=output0['text'])
output2 = text_classification(text=output1['text'])
from mnm.tool_api import image_classification, wikipedia_simple_search, text_classification
output0 = image_classification(image='/root/input_data/11899.jpg')
output1 = wikipedia_simple_search(text=output0['text'])
output2 = text_classification(text=output1['text'])
image_classification
wikipedia_simple_search
text_classification
image_classification
wikipedia_simple_search
text_classification
output0 = automatic_speech_recognition(audio="237-134500-0000.flac")
output1 = image_generation(text=f"a visual depiction of Frank talking about a divorce case that really angers him with reference to {output0['text']}")
output2 = object_detection(image=output1['image'])
from mnm.tool_api import automatic_speech_recognition, image_generation, object_detection
output0 = automatic_speech_recognition(audio='/root/input_data/237-134500-0000.flac')
output1 = image_generation(text=output0['text'])
output2 = object_detection(image=output1['image'])
automatic_speech_recognition
image_generation
object_detection
automatic_speech_recognition
image_generation
object_detection
output0 = get_math_fact(number="7")
output1 = image_generation(text=f"a relevant image that illustrates the unique mathematical fact about the number 7 noted in {output0['text']}")
output2 = image_crop_top(image=output1['image'])
from mnm.tool_api import get_math_fact, image_generation, image_crop_top
output0 = get_math_fact(number="7")
output1 = image_generation(text=output0['text'])
output2 = image_crop_top(image=output1['image'])
get_math_fact
image_generation
image_crop_top
get_math_fact
image_generation
image_crop_top
output0 = get_date_fact(date="11-Jul")
output1 = text_generation(text=f"a fun and heartwarming birthday wish message related to {output0['text']}")
output2 = text_generation(text=f"Summarize the text: {output1['text']}")
from mnm.tool_api import get_date_fact, text_generation
date_fact = get_date_fact("07/11")
birthday_wish_prompt = f"Did you know? {date_fact['text']}. Now, here's a birthday wish for you: "
birthday_wish = text_generation(birthday_wish_prompt)
get_date_fact
text_generation
text_generation
get_date_fact
text_generation
output0 = love_calculator(first_name="Romeo Montague", second_name="Juliet Capulet")
output1 = get_trivia_fact(number=output0['number'])
output2 = image_generation(text=f"a creatively designed image which portrays the fact associated with {output1['text']}")
from mnm.tool_api import love_calculator, get_trivia_fact, image_generation
output0 = love_calculator("Romeo Montague", "Juliet Capulet")
output1 = get_trivia_fact(output0["number"])
output2 = image_generation(output1["text"])
love_calculator
get_trivia_fact
image_generation
love_calculator
get_trivia_fact
image_generation
output0 = get_date_fact(date="20-Jul")
output1 = text_generation(text=f"a brief creative narrative on the interesting fact that happened on 20th July: {output0['text']}")
output2 = question_answering(text=output1['text'], question="Who was the key person involved in the event that happened on 20th July?")
from mnm.tool_api import get_date_fact, text_generation, question_answering
output0 = get_date_fact("07/20")
output1 = text_generation(f"On 20th July 1977, {output0['text']}.")
output2 = question_answering("Who was the main person involved in the event?", output1['text'])
get_date_fact
text_generation
question_answering
get_date_fact
text_generation
question_answering
output0 = get_trivia_fact(number="3")
output1 = text_generation(text=f"a child-friendly story that incorporates the trivia fact: {output0['text']}")
output2 = text_classification(text=output1['text'])
from mnm.tool_api import get_trivia_fact, text_generation, text_classification
trivia_fact = get_trivia_fact("3")
story_prompt = f"Once upon a time in a world with {trivia_fact['text']}, "
story = text_generation(story_prompt)
sentiment_result = text_classification(story['text'])
get_trivia_fact
text_generation
text_classification
get_trivia_fact
text_generation
text_classification
output0 = get_year_fact(year="2023")
output1 = image_generation(text=f"an illustrative picture based on {output0['text']}")
output2 = optical_character_recognition(image=output1['image'])
from mnm.tool_api import get_year_fact, image_generation, optical_character_recognition
output0 = get_year_fact("2023")
output1 = image_generation("Istanbul Canal will be finished")
output2 = optical_character_recognition(output1['image'])
get_year_fact
image_generation
optical_character_recognition
get_year_fact
image_generation
optical_character_recognition
output0 = get_date_fact(date="11-Jul")
from mnm.tool_api import get_date_fact
date_facts = get_date_fact("July 11")
get_date_fact
get_date_fact
output0 = image_generation(text="a vivid, generated image displaying a chef pulling a pizza out of a pizza oven with bustling activities")
output1 = image_crop_left(image=output0['image'])
output2 = image_segmentation(image=output1['image'])
from mnm.tool_api import image_generation, image_crop_left, image_segmentation
output0 = image_generation(text="a chef pulling a pizza out of a pizza oven")
output1 = image_crop_left(image=output0['image'])
output2 = image_segmentation(image=output1['image'])
image_generation
image_crop_left
image_segmentation
image_generation
image_crop_left
image_segmentation
output0 = search_movie(movie_title="Birdman or (The Unexpected Virtue of Ignorance)", movie_year="2014")
output1 = text_generation(text=f"a brief introduction for the film discussion based on {output0['text']}")
output2 = text_classification(text=output1['text'])
from mnm.tool_api import search_movie, text_generation, text_classification
output0 = search_movie("Birdman or (The Unexpected Virtue of Ignorance)", "2014")
output1 = text_generation(f"Let's discuss the movie titled '{output0['text']}' which was released in 2014. ")
output2 = text_classification(output1['text'])
search_movie
text_generation
text_classification
search_movie
text_generation
text_classification
output0 = get_math_fact(number="9")
output1 = image_generation(text=f"a relevant image depicting the intriguing math fact about number 9: {output0['text']}")
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import get_math_fact, image_generation, image_captioning
math_fact_about_9 = get_math_fact('9')
description_for_image = "9 is a composite number, its proper divisors being 1 and 3"
generated_image = image_generation(description_for_image)
caption_for_image = image_captioning(generated_image['image'])
get_math_fact
image_generation
image_captioning
get_math_fact
image_generation
image_captioning
output0 = automatic_speech_recognition(audio="1284-1180-0003.flac")
output1 = image_generation(text=f"a picturesque image of the land of Oz based on {output0['text']}")
output2 = image_editing(image=output1['image'], prompt="hot air balloon floating above the scenery")
from mnm.tool_api import automatic_speech_recognition, image_generation, image_editing
transcription = automatic_speech_recognition(audio='/root/input_data/1284-1180-0003.flac')
generated_image = image_generation(text=transcription['text'])
edited_image = image_editing(image=generated_image['image'], prompt="Add a tiny hot air balloon floating above the scenery.")
automatic_speech_recognition
image_generation
image_editing
automatic_speech_recognition
image_generation
image_editing
output0 = get_date_fact(date="7-May")
from mnm.tool_api import get_date_fact
historical_fact = get_date_fact(date="05/07")
get_date_fact
get_date_fact
output0 = optical_character_recognition(image="COCO_train2014_000000113236.jpg")
output1 = text_generation(text=f"a detailed, contextual description about traffic rules related to {output0['text']}")
output2 = text_classification(text=output1['text'])
from mnm.tool_api import optical_character_recognition, text_generation, text_classification
output0 = optical_character_recognition(image='/root/input_data/COCO_train2014_000000113236.jpg')
output1 = text_generation(text=f"{output0['text']} Traffic rules")
output2 = text_classification(text=output1['text'])
optical_character_recognition
text_generation
text_classification
optical_character_recognition
text_generation
text_classification
output0 = love_calculator(first_name="Fitzwilliam Darcy", second_name="Elizabeth Bennet")
output1 = get_trivia_fact(number=output0['number'])
output2 = text_generation(text=f"a unique piece of fiction or text based on the romantic compatibility between Fitzwilliam Darcy and Elizabeth Bennet and an interesting trivia fact related to their compatibility number, {output1['text']}")
from mnm.tool_api import love_calculator, get_trivia_fact, text_generation
output0 = love_calculator(first_name='Fitzwilliam Darcy', second_name='Elizabeth Bennet')
output1 = get_trivia_fact(number=str(output0['number']))
output2 = text_generation(text=f"{output0['message']} Fitzwilliam Darcy and Elizabeth Bennet's love is as enduring as the {output1['text']}.")
love_calculator
get_trivia_fact
text_generation
love_calculator
get_trivia_fact
text_generation
output0 = image_generation(text="a unique illustration of a silver jet flying high in the sky above the clouds")
from mnm.tool_api import image_generation
description = "A silver jet is flying high in the sky above the clouds"
generated_image = image_generation(text=description)
image_generation
image_generation
output0 = text_generation(text="Tell me about Why were chainsaws invented")
output1 = text_generation(text=f"a detailed elaboration beyond the basic information about {output0['text']}")
output2 = text_classification(text=output1['text'])
from mnm.tool_api import text_generation, text_classification
elaboration_result = text_generation(text="Why were chainsaws invented? Provide a detailed elaboration on the history and original purpose of chainsaws.")
elaboration_text = elaboration_result['text']
tone_result = text_classification(text=elaboration_text)
text_generation
text_generation
text_classification
text_generation
text_classification
output0 = get_trivia_fact(number="9")
output1 = image_generation(text=f"a visual representation of {output0['text']}")
output2 = image_segmentation(image=output1['image'])
from mnm.tool_api import get_trivia_fact, image_generation, image_segmentation
fact_about_number_9 = get_trivia_fact('9')
visual_representation = image_generation(fact_about_number_9['text'])
segmentation_result = image_segmentation(visual_representation['image'])
get_trivia_fact
image_generation
image_segmentation
get_trivia_fact
image_generation
image_segmentation
output0 = image_generation(text="A visual representation of a man doing tricks with a frisbee")
output1 = object_detection(image=output0['image'])
from mnm.tool_api import image_generation, object_detection
generated_image_result = image_generation(text="A man is doing tricks with a frisbee")
detected_objects_result = object_detection(image=generated_image_result['image'])
# Code to find the central object is not included in the summary as it is a calculation based on the results of object_detection.
image_generation
object_detection
image_generation
object_detection
output0 = get_year_fact(year="2015")
output1 = image_generation(text=f"a suitable image that captures the essence of {output0['text']}")
output2 = optical_character_recognition(image=output1['image'])
from mnm.tool_api import get_year_fact, image_generation, optical_character_recognition
output0 = get_year_fact("2015")
output1 = image_generation(output0['text'])
output2 = optical_character_recognition(output1['image'])
get_year_fact
image_generation
optical_character_recognition
get_year_fact
image_generation
optical_character_recognition
output0 = image_editing(image="177572-input.png", prompt="make the woman hold a camera")
output1 = object_detection(image=output0['image'])
output2 = select_object(objects=output1['objects'], object_name="camera")
from mnm.tool_api import image_editing, object_detection, select_object
edited_image_result = image_editing(image='/root/input_data/177572-input.png', prompt="A woman holding a camera")
objects_detection_result = object_detection(image=edited_image_result['image'])
camera_object = select_object(objects=objects_detection_result['objects'], object_name="camera")
image_editing
object_detection
select_object
image_editing
object_detection
select_object
output0 = text_classification(text="forced , familiar and thoroughly condescending . ")
from mnm.tool_api import text_classification
sentiment_result = text_classification(text="forced, familiar and thoroughly condescending.")
text_classification
text_classification
output0 = image_editing(image="134597-input.png", prompt="put a party hat on the dog")
output1 = object_detection(image=output0['image'])
from mnm.tool_api import image_editing, object_detection
edited_image_result = image_editing(image='/root/input_data/134597-input.png', prompt='A dog with a party hat.')
edited_image_path = '/root/playground/134597-edited.png'
edited_image_result['image'].save(edited_image_path)
objects_detected_result = object_detection(image=edited_image_path)
image_editing
object_detection
image_editing
object_detection
output0 = get_weather(lon="95.39", lat="29.79")
from mnm.tool_api import get_weather
import requests
# Latitude and longitude for the specified region
latitude = "29.79"
longitude = "95.39"
# Call the get_weather function with the provided coordinates
try:
weather_info = get_weather(lon=longitude, lat=latitude)
print(weather_info)
except requests.exceptions.JSONDecodeError as e:
print("Failed to decode JSON from the weather API response:", e)
except Exception as e:
print("An error occurred while fetching the weather information:", e)
get_weather
get_weather
output0 = search_movie(movie_title="The Shape of Water", movie_year="2017")
output1 = text_generation(text=f"a narrative description that talks about the main plot following the pool worker's journey in {output0['text']}")
output2 = image_generation(text=f"an image that encapsulates the atmosphere and mood of the movie from {output1['text']}")
from mnm.tool_api import search_movie, text_generation, image_generation
movie_info = search_movie(movie_title='The Shape of Water', movie_year='2017')
narrative_description = text_generation(text="The main plot follows the journey of a pool worker in the movie 'The Shape of Water'.")
image_result = image_generation(text=narrative_description['text'])
search_movie
text_generation
image_generation
search_movie
text_generation
image_generation
output0 = image_generation(text="an illustration depicting \"A man lounges on a bench watching the ocean.\"")
output1 = image_crop_bottom(image=output0['image'])
output2 = optical_character_recognition(image=output1['image'])
from mnm.tool_api import image_generation, image_crop_bottom, optical_character_recognition
output0 = image_generation(text="A man lounges on a bench watching the ocean.")
output1 = image_crop_bottom(image=output0['image'])
output2 = optical_character_recognition(image=output1['image'])
image_generation
image_crop_bottom
optical_character_recognition
image_generation
image_crop_bottom
optical_character_recognition
output0 = image_classification(image="18404.jpg")
output1 = image_generation(text=f"a new image depicting the same subject as {output0['text']}")
output2 = image_crop_top(image=output1['image'])
from PIL import Image, ImageOps
import io
from mnm.tool_api import image_classification, image_generation, image_crop_top
# Load the image using PIL
image_path = '/root/input_data/18404.jpg'
with open(image_path, 'rb') as file:
image_data = file.read()
image = Image.open(io.BytesIO(image_data))
# Identify the subject in the image
classification_result = image_classification(image)
subject = classification_result['text']
# Generate a new image based on the subject
generation_result = image_generation(text=subject)
generated_image = generation_result['image']
# Crop the top portion of the newly generated image
cropped_image_result = image_crop_top(generated_image)
cropped_image = cropped_image_result['image']
image_classification
image_generation
image_crop_top
image_classification
image_generation
image_crop_top
output0 = get_trivia_fact(number="6")
output1 = image_generation(text=f"a vibrant, engaging image representing {output0['text']}")
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import get_trivia_fact, image_generation, image_captioning
output0 = get_trivia_fact("6")
output1 = image_generation("the number of points on a Star of David")
output2 = image_captioning(output1['image'])
get_trivia_fact
image_generation
image_captioning
get_trivia_fact
image_generation
image_captioning
output0 = object_detection(image="2413350.jpg")
output1 = count(objects=output0['objects'])
output2 = get_math_fact(number=output1['number'])
from mnm.tool_api import object_detection, count, get_math_fact
output0 = object_detection(image='/root/input_data/2413350.jpg')
output1 = count(objects=output0['objects'])
output2 = get_math_fact(number=str(output1['number']))
object_detection
count
get_math_fact
object_detection
count
get_math_fact
output0 = object_detection(image="11561.jpg")
output1 = image_generation(text=f"create a new image that represents the same concept as {output0['text']}")
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import object_detection, image_generation, image_captioning
image_path = '/root/input_data/11561.jpg'
image = Image.open(image_path)
detection_result = object_detection(image)
main_subject = detection_result['objects'][0]['label']
text_description = main_subject
generated_image_result = image_generation(text_description)
caption_result = image_captioning(generated_image_result['image'])
object_detection
image_generation
image_captioning
object_detection
image_generation
image_captioning
output0 = get_date_fact(date="19-Oct")
output1 = image_generation(text=f"graphic image based on {output0['text']}")
output2 = image_segmentation(image=output1['image'])
from mnm.tool_api import get_date_fact, image_generation, image_segmentation
date_fact = get_date_fact("October 19")
image_description = "Napoleon I of France retreats from Moscow"
generated_image_result = image_generation(image_description)
segmentation_result = image_segmentation(generated_image_result['image'])
get_date_fact
image_generation
image_segmentation
get_date_fact
image_generation
image_segmentation
output0 = image_generation(text="an illustration of a scene where a baseball player swung for the ball")
output1 = optical_character_recognition(image=output0['image'])
output2 = text_generation(text=f"a follow-up story context based on {output1['text']} to continue the narrative")
from mnm.tool_api import image_generation, optical_character_recognition, text_generation
output0 = image_generation(text='There is a baseball player who swung for the ball')
output1 = optical_character_recognition(image=output0['image'])
output2 = text_generation(text=output1['text'])
image_generation
optical_character_recognition
text_generation
image_generation
optical_character_recognition
text_generation
output0 = image_editing(image="15653-input.png", prompt="Have there be a bottle behind the vegetables")
from mnm.tool_api import image_editing
output0 = image_editing(image='/root/input_data/15653-input.png', prompt='Place a bottle behind the vegetables.')
image_editing
image_editing
output0 = get_year_fact(year="2022")
output1 = text_generation(text=f"an engaging speech segment based on the fascinating fact about the year 2022: {output0['text']}")
output2 = text_generation(text=f"Summarize the text: {output1['text']}")
from mnm.tool_api import get_year_fact, text_generation
year_fact = get_year_fact("2022")
speech_segment = text_generation(text=year_fact['text'])
get_year_fact
text_generation
text_generation
get_year_fact
text_generation
output0 = get_year_fact(year="2023")
output1 = image_generation(text=f"an intriguing image based on the fact of {output0['text']}")
output2 = image_segmentation(image=output1['image'])
from mnm.tool_api import get_year_fact, image_generation, image_segmentation
fact_about_2023 = get_year_fact("2023")
generated_image = image_generation(fact_about_2023['text'])
segmented_image = image_segmentation(generated_image['image'])
get_year_fact
image_generation
image_segmentation
get_year_fact
image_generation
image_segmentation
output0 = get_trivia_fact(number="4")
output1 = image_generation(text=f"a creative picture based on the fun fact about the number 4, which is {output0['text']}")
from mnm.tool_api import get_trivia_fact, image_generation
output0 = get_trivia_fact("4")
output1 = image_generation(output0['text'])
get_trivia_fact
image_generation
get_trivia_fact
image_generation
output0 = search_movie(movie_title="The Shape of Water", movie_year="2017")
output1 = image_generation(text=f"an intriguing image indirectly related to genre and director of {output0['text']}")
output2 = image_classification(image=output1['image'])
from mnm.tool_api import search_movie, image_generation, image_classification
output0 = search_movie("The Shape of Water", "2017")
output1 = image_generation("Drama, Fantasy, Romance by Guillermo del Toro")
output2 = image_classification(output1['image'])
search_movie
image_generation
image_classification
search_movie
image_generation
image_classification
output0 = love_calculator(first_name="Jacob Black", second_name="Bella Swan")
output1 = get_math_fact(number=output0['number'])
output2 = image_generation(text=f"a creative visual representation of {output1['text']}")
from mnm.tool_api import love_calculator, get_math_fact, image_generation
compatibility_result = love_calculator(first_name="Jacob", second_name="Bella")
math_fact_result = get_math_fact(number="random")
visual_representation = image_generation(text=math_fact_result["text"])
love_calculator
get_math_fact
image_generation
love_calculator
get_math_fact
image_generation
output0 = optical_character_recognition(image="COCO_train2014_000000256855.jpg")
from mnm.tool_api import optical_character_recognition
text_recognition_result = optical_character_recognition(image='/root/input_data/COCO_train2014_000000256855.jpg')
optical_character_recognition
optical_character_recognition
output0 = image_captioning(image="2323530.jpg")
output1 = image_generation(text=f"a new image based on the scene of {output0['text']}")
output2 = optical_character_recognition(image=output1['image'])
from mnm.tool_api import image_captioning, image_generation, optical_character_recognition
description = image_captioning(image='/root/input_data/2323530.jpg')
new_image = image_generation(text=description['text'])
text_from_new_image = optical_character_recognition(image=new_image['image'])
image_captioning
image_generation
optical_character_recognition
image_captioning
image_generation
optical_character_recognition
output0 = text_generation(text="An overall understanding of the topic - What Really Happens When You Flush on an Airplane?")
output1 = text_summarization(text=output0['text'])
output2 = wikipedia_simple_search(text=output1['text'])
from mnm.tool_api import text_generation, text_summarization, wikipedia_simple_search
output0 = text_generation("What Really Happens When You Flush on an Airplane?")
output1 = text_summarization(output0['text'])
output2 = wikipedia_simple_search("Aircraft lavatory")
text_generation
text_summarization
wikipedia_simple_search
text_generation
text_summarization
wikipedia_simple_search
output0 = image_editing(image="407550-input.png", prompt="change the carrot into broccoli")
output1 = image_captioning(image=output0['image'])
output2 = text_summarization(text=output1['text'])
from mnm.tool_api import image_editing, image_captioning, text_summarization
edited_image_result = image_editing(image='/root/input_data/407550-input.png', prompt='Replace the carrot with broccoli.')
caption_result = image_captioning(image=edited_image_result['image'])
tagline_result = text_summarization(text=caption_result['text'])
image_editing
image_captioning
text_summarization
image_editing
image_captioning
text_summarization
output0 = automatic_speech_recognition(audio="237-134500-0000.flac")
output1 = text_summarization(text=output0['text'])
output2 = question_answering(text=output1['text'], question="who was getting divorced in the case Frank was reading about?")
from mnm.tool_api import automatic_speech_recognition, text_summarization, question_answering
audio_file_path = '/root/input_data/237-134500-0000.flac'
transcription_result = automatic_speech_recognition(audio=audio_file_path)
summarized_text = text_summarization(text=transcription_result['text'])
question_about_divorce = "Who was getting divorced in the case?"
divorce_info = question_answering(question=question_about_divorce, text=transcription_result['text'])
automatic_speech_recognition
text_summarization
question_answering
automatic_speech_recognition
text_summarization
question_answering
output0 = get_trivia_fact(number="2")
output1 = image_generation(text=f"an illustrative image of a fascinating fact about the number {output0['text']}")
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import get_trivia_fact, image_generation, image_captioning
fact_about_number_two = get_trivia_fact('2')
generated_image = image_generation(fact_about_number_two['text'])
image_description = image_captioning(generated_image['image'])
get_trivia_fact
image_generation
image_captioning
get_trivia_fact
image_generation
image_captioning
output0 = search_movie(movie_title="Nomadland", movie_year="2020")
from mnm.tool_api import search_movie
movie_details = search_movie(movie_title='Nomadland', movie_year='2020')
search_movie
search_movie
output0 = get_year_fact(year="2016")
output1 = image_generation(text=f"a visual representation of {output0['text']} suitable for a PowerPoint presentation")
from mnm.tool_api import get_year_fact, image_generation
output0 = get_year_fact("2016")
output1 = image_generation("NASA's Juno spacecraft is expected to arrive at Jupiter")
get_year_fact
image_generation
get_year_fact
image_generation
output0 = get_date_fact(date="4-May")
output1 = text_generation(text=f"a fascinating story based on {output0['text']} to impress my friends at a trivia night")
from mnm.tool_api import get_date_fact, text_generation
output0 = get_date_fact(date="05/04")
output1 = text_generation(text=f"On the 4th of May, {output0['text']}.")
get_date_fact
text_generation
get_date_fact
text_generation
output0 = object_detection(image="2328879.jpg")
output1 = count(objects=output0['objects'])
output2 = get_trivia_fact(number=output1['number'])
from PIL import Image
from mnm.tool_api import object_detection, count, get_trivia_fact
image = Image.open('/root/input_data/2328879.jpg')
detection_result = object_detection(image)
object_count = count(detection_result['objects'])
trivia_fact = get_trivia_fact(str(object_count['number']))
object_detection
count
get_trivia_fact
object_detection
count
get_trivia_fact
output0 = get_trivia_fact(number="4")
from mnm.tool_api import get_trivia_fact
output0 = get_trivia_fact("4")
get_trivia_fact
get_trivia_fact
output0 = image_captioning(image="2349496.jpg")
from mnm.tool_api import image_captioning
descriptive_text_summary = image_captioning(image_path='/root/input_data/2349496.jpg')
image_captioning
image_captioning
output0 = image_generation(text="a detailed visual of the scene: The city street is quiet during this time of night.")
output1 = image_captioning(image=output0['image'])
output2 = text_summarization(text=output1['text'])
from mnm.tool_api import image_generation, image_captioning, text_summarization
output0 = image_generation(text="The city street is quiet during this time of night.")
output1 = image_captioning(image=output0['image'])
output2 = text_summarization(text=output1['text'])
image_generation
image_captioning
text_summarization
image_generation
image_captioning
text_summarization
output0 = get_year_fact(year="2022")
output1 = image_generation(text=f"a unique image corresponding to {output0['text']}")
output2 = image_segmentation(image=output1['image'])
from mnm.tool_api import get_year_fact, image_generation, image_segmentation
output0 = get_year_fact("2022")
output1 = image_generation(output0['text'])
output2 = image_segmentation(output1['image'])
get_year_fact
image_generation
image_segmentation
get_year_fact
image_generation
image_segmentation
output0 = get_trivia_fact(number="9")
output1 = image_generation(text=f"an illustrative image portraying the unique fact about number 9 from {output0['text']}")
output2 = image_editing(image=output1['image'], prompt="Add a sun in the top right corner")
from mnm.tool_api import get_trivia_fact, image_generation, image_editing
output0 = get_trivia_fact('9')
output1 = image_generation(output0['text'])
output2 = image_editing(output1['image'], "Add a sun in the top right corner")
get_trivia_fact
image_generation
image_editing
get_trivia_fact
image_generation
image_editing
output0 = get_date_fact(date="24-Aug")
output1 = text_generation(text=f"a speech inspired by the historical fact: {output0['text']}")
output2 = text_classification(text=output1['text'])
from mnm.tool_api import get_date_fact, text_generation, text_classification
output0 = get_date_fact("08/24")
output1 = text_generation(text=output0['text'])
output2 = text_classification(text=output1['text'])
get_date_fact
text_generation
text_classification
get_date_fact
text_generation
text_classification
output0 = automatic_speech_recognition(audio="672-122797-0000.flac")
output1 = image_generation(text=f"an artist interpretation of the scene from the audio file {output0['text']}")
output2 = image_classification(image=output1['image'])
from mnm.tool_api import automatic_speech_recognition, image_generation, image_classification
audio_file_path = '/root/input_data/672-122797-0000.flac'
transcription = automatic_speech_recognition(audio=audio_file_path)
transcribed_text = transcription['text']
generated_image = image_generation(text=transcribed_text)
classification_result = image_classification(image=generated_image['image'])
image_type = classification_result['text']
automatic_speech_recognition
image_generation
image_classification
automatic_speech_recognition
image_generation
image_classification
output0 = image_editing(image="111376-input.png", prompt="Have the cow wear a hat.")
output1 = image_crop_left(image=output0['image'])
from mnm.tool_api import image_editing, image_crop_left
from PIL import Image
original_image_path = '/root/input_data/111376-input.png'
original_image = Image.open(original_image_path)
edited_image_info = image_editing(image=original_image, prompt="Have the cow wear a hat.")
cropped_image_info = image_crop_left(image=edited_image_info['image'])
final_image_path = '/root/playground/edited_and_cropped_image.png'
cropped_image_info['image'].save(final_image_path)
image_editing
image_crop_left
image_editing
image_crop_left
output0 = get_year_fact(year="2022")
output1 = image_generation(text=f"an image based on the interesting fact from 2022 {output0['text']}")
output2 = image_segmentation(image=output1['image'])
from mnm.tool_api import get_year_fact, image_generation, image_segmentation
fact = get_year_fact("2022")
generated_image = image_generation(fact['text'])
segmentation_result = image_segmentation(generated_image['image'])
get_year_fact
image_generation
image_segmentation
get_year_fact
image_generation
image_segmentation
output0 = get_year_fact(year="2023")
output1 = text_generation(text=f"a short story based on the interesting fact from {output0['text']}")
from mnm.tool_api import get_year_fact, text_generation
year_fact = get_year_fact("2023")
prompt_for_story = "Tell a short story about the completion of the Istanbul Canal in 2023 and its impact on the city."
short_story = text_generation(text=prompt_for_story)
get_year_fact
text_generation
get_year_fact
text_generation
output0 = get_weather(lon="18.41", lat="34.02")
from mnm.tool_api import get_weather
output0 = get_weather(lon="18.41", lat="34.02")
get_weather
get_weather
output0 = love_calculator(first_name="Christian Grey", second_name="Anastasia Steele")
from mnm.tool_api import love_calculator
output0 = love_calculator("Christian Grey", "Anastasia Steele")
love_calculator
love_calculator
output0 = image_generation(text="A long passenger train traveling over a rail bridge.")
output1 = image_editing(image=output0['image'], prompt="it's snowing heavily and the train and the bridge are almost covered in snow")
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import image_generation, image_editing, image_captioning
output0 = image_generation(text='A long passenger train traveling over a rail bridge.')
output1 = image_editing(image=output0['image'], prompt='It is snowing heavily and the train and the bridge are almost covered in snow.')
output2 = image_captioning(image=output1['image'])
image_generation
image_editing
image_captioning
image_generation
image_editing
image_captioning
output0 = image_generation(text="An illustration displaying some big red metal boxes located near a road")
output1 = image_crop_right(image=output0['image'])
from mnm.tool_api import image_generation, image_crop_right
output0 = image_generation(text='some big red metal boxes located near a road')
output1 = image_crop_right(image=output0['image'])
image_generation
image_crop_right
image_generation
image_crop_right
output0 = love_calculator(first_name="Jack Dawson", second_name="Rose Dewitt Bukater")
output1 = get_trivia_fact(number=output0['number'])
output2 = text_classification(text=output1['text'])
from mnm.tool_api import love_calculator, get_trivia_fact, text_classification
compatibility_result = love_calculator("Jack Dawson", "Rose Dewitt Bukater")
trivia_fact_result = get_trivia_fact("random")
sentiment_result = text_classification(trivia_fact_result["text"])
love_calculator
get_trivia_fact
text_classification
love_calculator
get_trivia_fact
text_classification
output0 = image_editing(image="581668-input.png", prompt="Add cookies to the tray.")
output1 = image_captioning(image=output0['image'])
output2 = text_classification(text=output1['text'])
from mnm.tool_api import image_editing, image_captioning, text_classification
edited_image_result = image_editing(image="/root/input_data/581668-input.png", prompt="Add a few cookies to the tray in the picture.")
caption_result = image_captioning(image=edited_image_result['image'])
sentiment_result = text_classification(text=caption_result['text'])
image_editing
image_captioning
text_classification
image_editing
image_captioning
text_classification
output0 = image_editing(image="111376-input.png", prompt="Have the cow wear a hat.")
output1 = image_captioning(image=output0['image'])
from mnm.tool_api import image_editing, image_captioning
edited_image_result = image_editing(image='/root/input_data/111376-input.png', prompt='A cow wearing a hat')
description_result = image_captioning(image=edited_image_result['image'])
image_editing
image_captioning
image_editing
image_captioning
output0 = text_generation(text="Tell me about What does woke mean")
output1 = text_generation(text=f"a paragraph illustrating the usage of {output0['text']}")
output2 = text_classification(text=output1['text'])
from mnm.tool_api import text_generation, text_classification
paragraph = text_generation(text="woke")
sentiment = text_classification(text=paragraph['text'])
text_generation
text_generation
text_classification
text_generation
text_classification
output0 = visual_question_answering(image="2348288.jpg", question="What color do you think that tail is?")
output1 = text_generation(text=f"a creative and fun story that links the color of the tail, which is {output0['text']}, with the scene of two dogs playing together on the grass")
output2 = text_classification(text=output1['text'])
from mnm.tool_api import object_detection, visual_question_answering, text_generation, text_classification
output0 = object_detection(image='/root/input_data/2348288.jpg')
output1 = visual_question_answering(image='/root/input_data/2348288.jpg', question="What color is the dog's tail?")
output2 = text_generation(text="In a scene filled with the hustle and bustle of a vibrant city park, a dog with a black tail stands out. Its tail wags energetically as it navigates through the crowd, leaving a trail of smiles in its wake. The dog's tail is black, and it seems to have a story of its own. Let's dive into the tale of this black-tailed adventurer.")
output3 = text_classification(text=output2['text'])
visual_question_answering
text_generation
text_classification
object_detection
visual_question_answering
text_generation
text_classification
output0 = get_math_fact(number="5")
output1 = image_generation(text=f"an image representing fun facts about the number 5 based on {output0['text']}")
output2 = image_classification(image=output1['image'])
from mnm.tool_api import get_math_fact, get_trivia_fact, image_generation, image_classification
math_fact = get_math_fact('5')
trivia_fact = get_trivia_fact('5')
description = f"{math_fact['text']}. {trivia_fact['text']}."
generated_image = image_generation(description)
classification_result = image_classification(generated_image['image'])
get_math_fact
image_generation
image_classification
get_math_fact
get_trivia_fact
image_generation
image_classification
output0 = image_generation(text="a situation where a cyclist pauses near the water to observe a canoe.")
output1 = image_editing(image=output0['image'], prompt="evening with a beautiful sunset in the background")
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import image_generation, image_editing
output0 = image_generation(text="A cyclist pauses near the water to observe a canoe.")
output1 = image_editing(image=output0['image'], prompt="Change the time to evening with a beautiful sunset in the background.")
image_generation
image_editing
image_captioning
image_generation
image_editing
output0 = search_movie(movie_title="Green Book", movie_year="2018")
output1 = text_summarization(text=output0['text'])
output2 = image_generation(text=f" a visual representation that helps grasp the overall ambiance and mood of {output1['text']}")
from mnm.tool_api import search_movie, image_generation
movie_summary = search_movie(movie_title="Green Book", movie_year="2018")
visual_representation = image_generation(text=movie_summary['text'])
search_movie
text_summarization
image_generation
search_movie
image_generation
output0 = image_generation(text="an animation based on the description \"A trolley with people drives down a street by a building.\"")
output1 = optical_character_recognition(image=output0['image'])
output2 = text_classification(text=output1['text'])
from mnm.tool_api import image_generation, optical_character_recognition, text_classification
generated_image = image_generation(text='A trolley with people drives down a street by a building')
ocr_results = optical_character_recognition(image=generated_image['image'])
detected_texts = ocr_results['text'].split(', ')
text_vibes = [text_classification(text=text) for text in detected_texts]
image_generation
optical_character_recognition
text_classification
image_generation
optical_character_recognition
output0 = question_answering(text="Religious and spiritual teachers, such as gurus, mullahs, rabbis, pastors/youth pastors and lamas, may teach religious texts such as the Quran, Torah or Bible.", question="What is another general name for a religious teacher?")
output1 = wikipedia_simple_search(text=output0['text'])
output2 = text_classification(text=output1['text'])
from mnm.tool_api import wikipedia_simple_search, text_classification
general_term_search_result = wikipedia_simple_search("religious teacher")
wikipedia_page_result = wikipedia_simple_search("religious honorifics")
sentiment_result = text_classification(wikipedia_page_result['text'])
question_answering
wikipedia_simple_search
text_classification
wikipedia_simple_search
wikipedia_simple_search
text_classification
output0 = object_detection(image="2409782.jpg")
output1 = select_object(objects=output0['objects'], object_name="lamp")
output2 = image_crop(image="2409782.jpg", object=output1['object'])
from mnm.tool_api import object_detection, image_crop
from PIL import Image
image_path = '/root/input_data/2409782.jpg'
image = Image.open(image_path)
detection_result = object_detection(image)
closest_bbox = [123.38, 253.69, 262.66, 310.34] # This was determined by finding the object closest to the center of the image
object_to_crop = {'bbox': closest_bbox}
cropped_image_info = image_crop(image, object_to_crop)
cropped_image = cropped_image_info['image']
cropped_image_path = '/root/playground/cropped_closest_center.jpg'
cropped_image.save(cropped_image_path)
object_detection
select_object
image_crop
object_detection
image_crop
output0 = get_math_fact(number="6")
output1 = text_generation(text=f"a short story to make math learning fun based on {output0['text']}")
output2 = question_answering(text=output1['text'], question="What is the essential mathematical characteristic of the number 6 discussed in the story?")
from mnm.tool_api import get_math_fact, text_generation
math_fact_about_6 = get_math_fact('6')
story_prompt = f"Once upon a time, the number 6 discovered that it was {math_fact_about_6['text']}."
short_story = text_generation(story_prompt)
comprehension_question = "What is the essential mathematical characteristic of the number 6 discussed in the story?"
get_math_fact
text_generation
question_answering
get_math_fact
text_generation
output0 = search_movie(movie_title="Green Book", movie_year="2018")
output1 = text_generation(text=f"Summarize the text: {output0['text']}")
output2 = question_answering(text=output1['text'], question="who the director of 'Green Book' is?")
from mnm.tool_api import search_movie, text_generation
movie_info = search_movie(movie_title='Green Book', movie_year='2018')
summary_prompt = f"Summarize the following movie information in under a minute read: {movie_info['text']}"
movie_summary = text_generation(text=summary_prompt)
director = movie_info['text'].split("Director: ")[1].split("\n")[0]
search_movie
text_generation
question_answering
search_movie
text_generation
output0 = image_generation(text="an image that portrays \"Two cups of fruits placed next to each other.\"")
output1 = image_editing(image=output0['image'], prompt="both cups filled with only strawberries")
output2 = image_classification(image=output1['image'])
from mnm.tool_api import image_editing, image_classification
edited_image_result = image_editing(image_path='/root/input_data/example.png', prompt='Both cups filled with only strawberries.')
classification_result = image_classification(image=edited_image_result['image'])
image_generation
image_editing
image_classification
image_editing
image_classification
output0 = wikipedia_simple_search(text="What is a recession")
output1 = text_summarization(text=output0['text'])
output2 = question_answering(text=output1['text'], question="explain how a recession impacts the average individual?")
from mnm.tool_api import wikipedia_simple_search, text_summarization
output0 = wikipedia_simple_search("recession")
output1 = text_summarization(output0['text'])
output2 = wikipedia_simple_search("impact of recession on individuals")
output3 = text_summarization(output2['text'])
output4 = wikipedia_simple_search("How does a recession impact the average individual?")
output5 = wikipedia_simple_search("Effects of a recession on individuals")
wikipedia_simple_search
text_summarization
question_answering
wikipedia_simple_search
text_summarization
wikipedia_simple_search
text_summarization
wikipedia_simple_search
wikipedia_simple_search
output0 = search_movie(movie_title="The Shape of Water", movie_year="2017")
output1 = image_generation(text=f"an image that represents a subtle hint based on the genre and director of {output0['text']}")
output2 = image_classification(image=output1['image'])
from mnm.tool_api import image_generation, image_classification
output0 = image_generation(text="A fantasy movie set in the 1960s during the Cold War with an amphibious creature")
output1 = image_classification(image=output0['image'])
search_movie
image_generation
image_classification
image_generation
image_classification
output0 = text_generation(text="a step-by-step guide or tips for baking under the conditions of \"Bake 375 for 10 minutes then turn down to 350 degrees for 45 minutes.\"")
output1 = question_answering(text=output0['text'], question="What is the total baking time?")
output2 = image_generation(text=f"a visual representation of the procedure based on {output0['text']}")
from mnm.tool_api import text_generation, image_generation
guide = text_generation("Bake 375 for 10 minutes then turn down to 350 degrees for 45 minutes.")
total_baking_time = 10 + 45 # Total baking time in minutes
visual_representation = image_generation(guide['text'])
text_generation
question_answering
image_generation
text_generation
image_generation
output0 = automatic_speech_recognition(audio="6829-68769-0002.flac")
output1 = text_generation(text=f"extrapolation on what the lawyer's conversation could have been about before or after the line: {output0['text']}")
output2 = question_answering(text=output1['text'], question="what could be the possible reasons or situations that caused the old lawyer to say \"I can't see it in that light\"?")
from mnm.tool_api import automatic_speech_recognition, text_generation
transcription = automatic_speech_recognition(audio="/root/input_data/6829-68769-0002.flac")
prompt_before = "What led to the conversation where the old lawyer said: " + transcription['text']
output0 = text_generation(prompt_before)
prompt_after = transcription['text'] + " What did the old lawyer say next?"
output1 = text_generation(prompt_after)
prompt_reasons = "Why would an old lawyer say: " + transcription['text']
output2 = text_generation(prompt_reasons)
automatic_speech_recognition
text_generation
question_answering
automatic_speech_recognition
text_generation
text_generation
text_generation
output0 = search_movie(movie_title="Moonlight", movie_year="2016")
output1 = question_answering(text=output0['text'], question="what genre it falls into?")
output2 = text_generation(text=f"a general overview of what kinds of themes I might encounter in films of this type based on {output1['text']}")
from mnm.tool_api import search_movie, text_generation
movie_details = search_movie("Moonlight", "2016")
genre = movie_details.get('text', '').split("Genre: ")[1].split("\n")[0]
themes_overview = text_generation(f"Common themes in {genre} films include:")
search_movie
question_answering
text_generation
search_movie
text_generation
output0 = search_movie(movie_title="Parasite", movie_year="2019")
output1 = question_answering(text=output0['text'], question=" who the director is ?")
output2 = wikipedia_simple_search(text=output1['text'])
from mnm.tool_api import search_movie, wikipedia_simple_search
output0 = search_movie(movie_title="Parasite", movie_year="2019")
output1 = wikipedia_simple_search(text="Parasite 2019 film")
search_movie
question_answering
wikipedia_simple_search
search_movie
wikipedia_simple_search
output0 = love_calculator(first_name="Romeo Montague", second_name="Juliet Capulet")
output1 = get_math_fact(number=output0['number'])
output2 = image_generation(text=f"a visual representation of {output1['text']}")
from mnm.tool_api import love_calculator, image_generation
output0 = love_calculator("Romeo Montague", "Juliet Capulet")
output1 = image_generation("Mathematics: The language of the universe.")
love_calculator
get_math_fact
image_generation
love_calculator
image_generation
output0 = get_year_fact(year="2018")
output1 = text_generation(text=f"a descriptive narrative based on the intriguing fact from 2018: {output0['text']}")
output2 = image_generation(text=f"a suitable image that depicts the narrative of {output1['text']}")
from mnm.tool_api import get_year_fact, image_generation
year_fact = get_year_fact("2018")
description = "A representation of the year 2018, showcasing themes of technology, advancement, and global connectivity."
generated_image = image_generation(description)
get_year_fact
text_generation
image_generation
get_year_fact
image_generation
output0 = image_editing(image="270650-input.png", prompt="put skis on the wheel")
output1 = image_captioning(image=output0['image'])
output2 = text_generation(text=f"Summarize the text: {output1['text']}")
from mnm.tool_api import image_editing, text_generation
output0 = image_editing(image='/root/input_data/270650-input.png', prompt="Add skis to the wheels of the car.")
output1 = text_generation(text="Caption for a car with skis instead of wheels.")
image_editing
image_captioning
text_generation
image_editing
text_generation
output0 = image_editing(image="398575-input.png", prompt="let the stop light be a spear")
output1 = object_detection(image=output0['image'])
output2 = tag(image=output1['image'], objects=output1['objects'])
from mnm.tool_api import image_editing, object_detection
edited_image_result = image_editing(image='/root/input_data/398575-input.png', prompt="Transform the stoplight into a spear.")
detection_result = object_detection(image=edited_image_result['image'])
image_editing
object_detection
tag
image_editing
object_detection
output0 = search_movie(movie_title="CODA", movie_year="2021")
output1 = text_generation(text=f"a brief summary or abstract of the movie CODA based on {output0['text']}")
output2 = question_answering(text=output1['text'], question="Who is the director of CODA?")
from mnm.tool_api import search_movie, text_generation
movie_info = search_movie(movie_title='CODA', movie_year='2021')
movie_summary = text_generation(text=movie_info['text'])
search_movie
text_generation
question_answering
search_movie
text_generation
output0 = get_math_fact(number="10")
output1 = text_summarization(text=output0['text'])
output2 = question_answering(text=output1['text'], question="What are some interesting and simple properties of the number 10?")
from mnm.tool_api import get_math_fact, text_summarization
output0 = get_math_fact(number="10")
output1 = text_summarization(text=output0['text'])
get_math_fact
text_summarization
question_answering
get_math_fact
text_summarization
output0 = image_segmentation(image="2347302.jpg")
output1 = select_object(objects=output0['objects'], object_name="city street")
output2 = color_pop(image="2347302.jpg", object=output1['object'])
from PIL import Image
import io
from mnm.tool_api import image_segmentation, color_pop
# Load the image and convert it to a PIL image
image_path = '/root/input_data/2347302.jpg'
with open(image_path, 'rb') as file:
image_data = file.read()
image = Image.open(io.BytesIO(image_data))
# Segment the image
segmentation_result = image_segmentation(image)
# Apply the color pop effect to the image, keeping the street in color and the rest in grayscale
# Assuming the street segment is the first one in the list (for demonstration purposes)
street_segment = segmentation_result['objects'][0]
color_pop_result = color_pop(image, street_segment)
# Save the resulting image
output_image_path = '/root/playground/urban_planning_presentation.jpg'
color_pop_result['image'].save(output_image_path)
image_segmentation
select_object
color_pop
image_segmentation
color_pop
output0 = wikipedia_simple_search(text="What is Juneteenth")
output1 = text_summarization(text=output0['text'])
output2 = image_generation(text=f"a visual depiction of {output1['text']}")
from mnm.tool_api import text_summarization, image_generation
summary_result = text_summarization("Juneteenth is a holiday celebrating the emancipation of those who had been enslaved in the United States. It is also known as Freedom Day or Emancipation Day. Please provide a summary of this event.")
summary_text = summary_result.get('text', 'No summary available')
visual_result = image_generation("A visual representation of Juneteenth, the holiday celebrating the emancipation of those who had been enslaved in the United States.")
visual_image = visual_result.get('image', None)
wikipedia_simple_search
text_summarization
image_generation
text_summarization
image_generation
output0 = image_generation(text="an image depicting an athlete in the air during a snow ski long jump")
output1 = image_crop_top(image=output0['image'])
output2 = object_detection(image=output1['image'])
from mnm.tool_api import image_generation, object_detection
generated_image_result = image_generation(text="an athlete in the air during a snow ski long jump")
detected_objects_result = object_detection(image=generated_image_result['image'])
image_generation
image_crop_top
object_detection
image_generation
object_detection
output0 = image_captioning(image="2395343.jpg")
output1 = text_summarization(text=output0['text'])
output2 = image_generation(text=f"an image created from the narrative of {output1['text']} that fits the graphic novel’s art style and mood")
from mnm.tool_api import image_captioning, image_generation
from PIL import Image
image_path = '/root/input_data/2395343.jpg'
image = Image.open(image_path)
caption_result = image_captioning(image)
generated_image_result = image_generation(caption_result['text'])
output_image_path = '/root/playground/generated_image.jpg'
with open(output_image_path, 'wb') as file:
file.write(generated_image_result['image'].tobytes())
image_captioning
text_summarization
image_generation
image_captioning
image_generation
output0 = visual_question_answering(image="2336917.jpg", question="What place could this be?")
output1 = image_generation(text=f"a new panoramic image view of {output0['text']}")
output2 = image_crop_bottom(image=output1['image'])
from PIL import Image
from mnm.tool_api import image_classification, image_captioning, visual_question_answering, image_generation, image_crop_bottom
import io
# Load the image as a PIL Image object
image_path = '/root/input_data/2336917.jpg'
with open(image_path, 'rb') as file:
image_data = file.read()
image = Image.open(io.BytesIO(image_data))
# Classify the image to identify the place
classification_result = image_classification(image)
# Generate a descriptive caption of the image
caption_result = image_captioning(image)
# Ask a question about the location depicted in the image
location_question = "What is the name of this place?"
location_answer = visual_question_answering(image, location_question)
# Generate a panoramic image view of a generic train station
panoramic_description = "panoramic view of a train station"
panoramic_image_result = image_generation(panoramic_description)
panoramic_image = panoramic_image_result['image']
# Crop the lower half of the panoramic image
cropped_image_result = image_crop_bottom(panoramic_image)
cropped_image = cropped_image_result['image']
visual_question_answering
image_generation
image_crop_bottom
image_classification
image_captioning
visual_question_answering
image_generation
image_crop_bottom
output0 = image_editing(image="357903-input.png", prompt="Remove one of the pizzas.")
output1 = image_crop_top(image=output0['image'])
output2 = image_classification(image=output1['image'])
from mnm.tool_api import image_segmentation, image_editing, image_crop_top, image_classification
segmentation_result = image_segmentation('/root/input_data/357903-input.png')
pizza_to_remove = next((obj for obj in segmentation_result['objects'] if obj['label'] == 'pizza'), None)
prompt = f"Remove the object at {pizza_to_remove['bbox']} from the image."
edited_image_result = image_editing('/root/input_data/357903-input.png', prompt)
cropped_image_result = image_crop_top(edited_image_result['image'])
classification_result = image_classification(cropped_image_result['image'])
food_type = classification_result['text']
image_editing
image_crop_top
image_classification
image_segmentation
next
image_editing
image_crop_top
image_classification
output0 = search_movie(movie_title="12 Years a Slave", movie_year="2013")
output1 = text_summarization(text=output0['text'])
from mnm.tool_api import search_movie
movie_info = search_movie("12 Years a Slave", "2013")
search_movie
text_summarization
search_movie
output0 = get_year_fact(year="2022")
output1 = image_generation(text=f"an illustration that brings to life {output0['text']}")
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import wikipedia_simple_search, image_generation, image_captioning
trivia_result = wikipedia_simple_search("2022")
illustration_result = image_generation(trivia_result['text'])
description_result = image_captioning(illustration_result['image'])
get_year_fact
image_generation
image_captioning
wikipedia_simple_search
image_generation
image_captioning
output0 = image_generation(text="a visualization of two brown and black birds staring on a curved wire")
output1 = object_detection(image=output0['image'])
output2 = select_object(objects=output1['objects'], object_name="bird")
from mnm.tool_api import image_generation, object_detection, tag
output0 = image_generation(text='two brown and black birds staring on a curved wire')
output1 = object_detection(image=output0['image'])
output2 = tag(image=output0['image'], objects=output1['objects'])
image_generation
object_detection
select_object
image_generation
object_detection
tag
output0 = get_year_fact(year="2023")
output1 = text_generation(text=f"a novel summary based on the interesting fact about year 2023, which is {output0['text']}")
output2 = question_answering(text=output1['text'], question="What was the main conflict in the novel inspired by the year 2023 fact?")
from mnm.tool_api import get_year_fact, text_summarization, question_answering
output0 = get_year_fact("2023")
output1 = text_summarization(f"The Istanbul Canal is an ambitious project that is expected to be completed in the year 2023. {output0['text']}.")
output2 = question_answering("What was the main conflict in the novel inspired by the year 2023 fact?", output1['text'])
get_year_fact
text_generation
question_answering
get_year_fact
text_summarization
question_answering
output0 = visual_question_answering(image="2326465.jpg", question="What is the name of the fruit on the table?")
output1 = image_generation(text=f"a new image representing the specific fruit identified as {output0['text']}")
output2 = optical_character_recognition(image=output1['image'])
from mnm.tool_api import object_detection, image_generation, optical_character_recognition
output0 = object_detection(image='/root/input_data/2326465.jpg')
output1 = image_generation(text=output0['objects'][0]['label'])
output2 = optical_character_recognition(image='/root/playground/generated_fruit_image.jpg')
visual_question_answering
image_generation
optical_character_recognition
object_detection
image_generation
optical_character_recognition
output0 = image_editing(image="362944-input.png", prompt="The car should be white.")
output1 = image_segmentation(image=output0['image'])
output2 = count(objects=output1['objects'])
from mnm.tool_api import image_editing, image_segmentation
edited_image_result = image_editing(image='/root/input_data/362944-input.png', prompt="Recolor the car to white.")
segmentation_result = image_segmentation(image=edited_image_result['image'])
number_of_parts = len(segmentation_result['objects'])
image_editing
image_segmentation
count
image_editing
image_segmentation
len
output0 = image_generation(text="an image of big red metal boxes by a road dedicated for a kids story")
output1 = image_crop_right(image=output0['image'])
from mnm.tool_api import image_generation
generated_image = image_generation(text="big red metal boxes by a road")
image_generation
image_crop_right
image_generation
output0 = image_generation(text="a painting depicting a woman sitting on a wood bench")
output1 = object_detection(image=output0['image'])
output2 = select_object(objects=output1['objects'], object_name="wood bench")
from mnm.tool_api import image_generation, object_detection, tag
output0 = image_generation(text='this woman is sitting on a wood bench')
output1 = object_detection(image=output0['image'])
wood_bench = next((obj for obj in output1['objects'] if 'bench' in obj['label'].lower()), None)
output2 = tag(image=output0['image'], objects=[wood_bench]) if wood_bench else None
image_generation
object_detection
select_object
image_generation
object_detection
next
output0 = visual_question_answering(image="2317766.jpg", question="What color is the pen?")
output1 = text_generation(text=f"'In the realm of knowledge, a {output0['text']} pen on a yellow book indicated... and an intriguing continuation of this story")
output2 = text_summarization(text=output1['text'])
from mnm.tool_api import object_detection, text_generation, text_summarization
image_path = '/root/input_data/2317766.jpg'
detected_objects = object_detection(image_path)
story_prompt = "In the realm of knowledge, a black pen on a yellow book indicated..."
generated_story = text_generation(story_prompt)
full_story = story_prompt + " " + generated_story['text']
story_summary = text_summarization(full_story)
visual_question_answering
text_generation
text_summarization
object_detection
text_generation
text_summarization
output0 = get_trivia_fact(number="4")
output1 = text_summarization(text=output0['text'])
from mnm.tool_api import get_trivia_fact
fact_about_number_4 = get_trivia_fact('4')
simple_fact = "The number 4 is special because it's the same as the number of parts in the heart of animals like humans and cats!"
get_trivia_fact
text_summarization
get_trivia_fact
output0 = image_segmentation(image="2394635.jpg")
output1 = select_object(objects=output0['objects'], object_name="ramp")
output2 = color_pop(image="2394635.jpg", object=output1['object'])
from mnm.tool_api import image_segmentation, color_pop
from PIL import Image
image_path = '/root/input_data/2394635.jpg'
image = Image.open(image_path)
segmentation_result = image_segmentation(image)
ramp = next((obj for obj in segmentation_result['objects'] if obj['label'] == 'platform'), None)
result = color_pop(image, ramp)
result_image = result['image']
result_image_path = '/root/playground/modified_2394635.jpg'
result_image.save(result_image_path)
image_segmentation
select_object
color_pop
image_segmentation
next
color_pop
output0 = get_math_fact(number="10")
output1 = image_generation(text=f"a fun image depicting the unique fact about the number 10 from {output0['text']}")
output2 = image_editing(image=output1['image'], prompt="edit the image and include a group of ten animals in the foreground to personify the quantity 10 in nature")
from mnm.tool_api import get_trivia_fact, image_generation, image_editing
fact_about_number_10 = get_trivia_fact(number='10')
initial_image = image_generation(text=fact_about_number_10['text'])
edited_image_with_animals = image_editing(image=initial_image['image'], prompt="Include a group of ten animals in the foreground to personify the quantity 10 in nature.")
get_math_fact
image_generation
image_editing
get_trivia_fact
image_generation
image_editing
output0 = get_math_fact(number="3")
output1 = text_generation(text=f"a caption or story that includes the interesting fact: {output0['text']}")
output2 = image_generation(text=f"an illustration that encapsulates the story or caption: {output1['text']}")
from mnm.tool_api import get_trivia_fact, text_generation, image_generation
output0 = get_trivia_fact('3')
output1 = text_generation(f"Tell a story that includes the following fact: {output0['text']}.")
output2 = image_generation(output1['text'])
get_math_fact
text_generation
image_generation
get_trivia_fact
text_generation
image_generation
output0 = visual_question_answering(image="2350057.jpg", question="What is on the fence?")
output1 = text_generation(text=f"a fictional backstory that includes the object {output0['text']} from the fence in the image")
output2 = text_summarization(text=output1['text'])
from PIL import Image
from mnm.tool_api import object_detection, text_generation, text_summarization
image = Image.open('/root/input_data/2350057.jpg')
detected_objects = object_detection(image)
prompt = "Once upon a time, an elephant was sitting on a fence"
backstory = text_generation(prompt)
summary = text_summarization(backstory['text'])
visual_question_answering
text_generation
text_summarization
object_detection
text_generation
text_summarization
output0 = image_classification(image="23936.jpg")
output1 = text_generation(text=f"a detailed story about the object identified as {output0['text']}")
output2 = text_summarization(text=output1['text'])
from PIL import Image
from mnm.tool_api import image_classification, image_captioning, text_summarization
image_path = '/root/input_data/23936.jpg'
image = Image.open(image_path)
classification_result = image_classification(image)
story_result = image_captioning(image)
summary_result = text_summarization(story_result['text'])
image_classification
text_generation
text_summarization
image_classification
image_captioning
text_summarization
output0 = get_year_fact(year="2013")
output1 = text_summarization(text=output0['text'])
from mnm.tool_api import get_year_fact
year_fact = get_year_fact("2013")
get_year_fact
text_summarization
get_year_fact
output0 = image_editing(image="270650-input.png", prompt="put skis on the wheel")
output1 = image_segmentation(image=output0['image'])
output2 = count(objects=output1['objects'])
from mnm.tool_api import image_editing, image_segmentation
edited_image_result = image_editing(image='/root/input_data/270650-input.png', prompt="Add skis on the wheels in the photo.")
segmentation_result = image_segmentation(image=edited_image_result['image'])
number_of_parts = len(segmentation_result['objects'])
image_editing
image_segmentation
count
image_editing
image_segmentation
len
output0 = visual_question_answering(image="2326465.jpg", question="What is the name of the fruit on the table?")
output1 = image_generation(text=f"a supermarket aisle dedicated to the {output0['text']}")
output2 = optical_character_recognition(image=output1['image'])
from mnm.tool_api import object_detection, image_generation, optical_character_recognition
output0 = object_detection(image='/root/input_data/2326465.jpg')
output1 = image_generation(text="a supermarket aisle dedicated to apples")
output2 = optical_character_recognition(image=output1['image'])
visual_question_answering
image_generation
optical_character_recognition
object_detection
image_generation
optical_character_recognition
output0 = image_segmentation(image="2347302.jpg")
output1 = select_object(objects=output0['objects'], object_name="bus")
output2 = color_pop(image="2347302.jpg", object=output1['object'])
from mnm.tool_api import image_segmentation, color_pop
from PIL import Image
image_path = '/root/input_data/2347302.jpg'
image = Image.open(image_path)
segmentation_result = image_segmentation(image)
bus_object = next((obj for obj in segmentation_result['objects'] if obj['label'] == 'bus'), None)
color_pop_result = color_pop(image, bus_object)
image_segmentation
select_object
color_pop
image_segmentation
next
color_pop
output0 = image_generation(text="a visual representation of two men standing in some sand walking up a hill")
output1 = image_captioning(image=output0['image'])
output2 = question_answering(text=output1['text'], question="Do the men in the image appear to be equipped for their hike?")
from mnm.tool_api import image_generation, image_captioning, visual_question_answering
description = "two men standing in some sand walking up a hill"
generated_image = image_generation(text=description)
image_description = image_captioning(image=generated_image['image'])
question_about_preparedness = "Do the men in the image appear to be equipped for their hike?"
preparedness_answer = visual_question_answering(image=generated_image['image'], question=question_about_preparedness)
image_generation
image_captioning
question_answering
image_generation
image_captioning
visual_question_answering
output0 = search_movie(movie_title="Moonlight", movie_year="2016")
output1 = image_generation(text=f"an image that captures the essence of the movie Moonlight using details from {output0['text']}")
output2 = image_segmentation(image=output1['image'])
from mnm.tool_api import search_movie, image_generation, object_detection
output0 = search_movie("Moonlight", "2016")
output1 = image_generation(output0['text'])
output2 = object_detection(output1['image'])
search_movie
image_generation
image_segmentation
search_movie
image_generation
object_detection
output0 = get_year_fact(year="2018")
output1 = text_summarization(text=output0['text'])
output2 = question_answering(text=output1['text'], question=" who was the significant person involved in that event ?")
from mnm.tool_api import get_year_fact, text_summarization, wikipedia_simple_search
output0 = get_year_fact("2018")
output1 = text_summarization(output0['text'])
output2 = wikipedia_simple_search("2018")
get_year_fact
text_summarization
question_answering
get_year_fact
text_summarization
wikipedia_simple_search
output0 = object_detection(image="09966.jpg")
output1 = image_generation(text=f"new visuals focusing on the identified subject - {output0['text']}")
output2 = image_segmentation(image=output1['image'])
from PIL import Image
from mnm.tool_api import object_detection, image_editing, image_segmentation
image_path = '/root/input_data/09966.jpg'
image = Image.open(image_path)
detection_result = object_detection(image)
prompt = "Focus on the cake"
edited_image_result = image_editing(detection_result['image'], prompt)
segmentation_result = image_segmentation(edited_image_result['image'])
object_detection
image_generation
image_segmentation
object_detection
image_editing
image_segmentation
output0 = image_classification(image="13181.jpg")
output1 = image_generation(text=f"a refreshing depiction of {output0['text']}")
output2 = image_captioning(image=output1['image'])
from mnm.tool_api import image_classification, image_generation, text_generation
output0 = image_classification(image='/root/input_data/13181.jpg')
output1 = image_generation(text=output0['text'])
output2 = text_generation(text="Describe this image: " + output0['text'])
image_classification
image_generation
image_captioning
image_classification
image_generation
text_generation
output0 = image_segmentation(image="2356182.jpg")
output1 = count(objects=output0['objects'])
output2 = get_trivia_fact(number=output1['number'])
from mnm.tool_api import object_detection, count, get_trivia_fact
output0 = object_detection('/root/input_data/2356182.jpg')
output1 = count(output0['objects'])
output2 = get_trivia_fact(str(output1['number']))
image_segmentation
count
get_trivia_fact
object_detection
count
get_trivia_fact
output0 = wikipedia_simple_search(text="What is pansexual")
output1 = text_generation(text=f"Summarize the text: {output0['text']}")
output2 = text_generation(text=f"Expand upon the short and concise explanation of pansexual derived from {output1['text']} and provide more nuanced detail and possibly expand upon related concepts")
from mnm.tool_api import wikipedia_simple_search
summary_result = wikipedia_simple_search("Pansexual")
detailed_result_gender_identity = wikipedia_simple_search("Gender identity")
detailed_result_sexual_orientation = wikipedia_simple_search("Sexual orientation")
wikipedia_simple_search
text_generation
text_generation
wikipedia_simple_search
wikipedia_simple_search
wikipedia_simple_search
output0 = image_classification(image="17412.jpg")
output1 = text_generation(text=f"an informative paragraph about the {output0['text']}")
output2 = question_answering(text=output1['text'], question="What is the average lifespan of those animals in the wild?")
from PIL import Image
from mnm.tool_api import image_classification, text_generation, wikipedia_simple_search
# Load the image using PIL and classify it to identify the animal
image_path = '/root/input_data/17412.jpg'
image = Image.open(image_path)
classification_result = image_classification(image)
animal = classification_result['text']
# Generate an informative paragraph about the identified animal
animal_info = text_generation("pug dog")
informative_paragraph = animal_info['text']
# Retrieve information about the average lifespan of pugs in the wild
lifespan_info = wikipedia_simple_search("pug dog lifespan")
lifespan_paragraph = lifespan_info['text']
image_classification
text_generation
question_answering
image_classification
text_generation
wikipedia_simple_search
output0 = get_math_fact(number="5")
output1 = text_generation(text=f"thread together a cohesive, yet interesting storyline about {output0['text']}")
output2 = text_generation(text=f"Summarize the text: {output1['text']}")
from mnm.tool_api import get_math_fact, get_trivia_fact
math_fact_about_5 = get_math_fact("5")
trivia_fact_about_5 = get_trivia_fact("5")
storyline = "The number 5 is not just a digit but a symbol of significance across various domains. In the realm of geometry, it represents the number of Platonic solids, which are the only convex polyhedra with equivalent faces composed of congruent convex regular polygons. These solids have been known since antiquity and were famously associated with the elements of the universe by the philosopher Plato. Moreover, the number 5 holds a special place in the world of sports and unity. It is symbolized by the five interlocked rings of the Olympic Games, which represent the number of inhabited continents brought together by the spirit of competition and camaraderie. These continents are often counted as North America and South America as one, along with Africa, Asia, Europe, and Oceania. The number 5, therefore, carries with it a legacy of mathematical perfection and global unity."
summary = "Number 5: - Symbolizes the number of Platonic solids in geometry, representing the elements of the universe. - Embodied in the Olympic Games' symbol, uniting the inhabited continents through sports."
get_math_fact
text_generation
text_generation
get_math_fact
get_trivia_fact
output0 = search_movie(movie_title="12 Years a Slave", movie_year="2013")
output1 = text_summarization(text=output0['text'])
output2 = question_answering(text=output1['text'], question="Who was the director of the movie?")
from mnm.tool_api import search_movie
movie_info = search_movie(movie_title='12 Years a Slave', movie_year='2013')
director = movie_info.get('text', '').split('Director: ')[1].split('\n')[0]
search_movie
text_summarization
question_answering
search_movie
output0 = wikipedia_simple_search(text="What is a recession")
output1 = text_generation(text=f"an expansion on the topic of recession, including its impact on common individuals, based on {output0['text']}")
output2 = question_answering(text=output1['text'], question="What are the possible effects of a recession on the unemployment rate?")
from mnm.tool_api import wikipedia_simple_search
recession_info = wikipedia_simple_search("Recession")
# The variable recession_info contains the summary from Wikipedia about recession.
# Additional explanations about the impact on individuals and unemployment were provided in plain text.
wikipedia_simple_search
text_generation
question_answering
wikipedia_simple_search
output0 = wikipedia_simple_search(text="What is a recession")
output1 = text_generation(text=f"a comprehensive understanding based on {output0['text']} and expansion on the major effects recessions have had on global economies in recent history")
output2 = image_generation(text=f"a visual representation of a recession's impact on the stock market based on {output1['text']}")
from mnm.tool_api import wikipedia_simple_search
output0 = wikipedia_simple_search("Recession")
output1 = wikipedia_simple_search("Effects of recessions on global economies")
wikipedia_simple_search
text_generation
image_generation
wikipedia_simple_search
wikipedia_simple_search
output0 = object_detection(image="2387080.jpg")
output1 = select_object(objects=output0['objects'], object_name="computer desk")
from PIL import Image
from mnm.tool_api import object_detection
image = Image.open('/root/input_data/2387080.jpg')
detection_result = object_detection(image)
computer_desk = next((obj for obj in detection_result['objects'] if obj['label'].lower() == 'computer desk'), None)
alternative_labels = ['desk', 'table']
computer_desk = next((obj for obj in detection_result['objects'] if obj['label'].lower() in alternative_labels), None)
object_detection
select_object
object_detection
next
next
output0 = get_math_fact(number="5")
output1 = text_generation(text=f"a short story illustrating {output0['text']}")
from mnm.tool_api import get_trivia_fact, text_generation
output0 = get_trivia_fact("5")
output1 = text_generation("In a world where the number of Justices on the Supreme Court of the United States necessary to render a majority decision is 5, ")
get_math_fact
text_generation
get_trivia_fact
text_generation
output0 = question_answering(text="The league eventually narrowed the bids to three sites: New Orleans' Mercedes-Benz Superdome, Miami's Sun Life Stadium, and the San Francisco Bay Area's Levi's Stadium.", question="Which Louisiana venue was one of three considered for Super Bowl 50?")
output1 = image_generation(text=f"a representative image of {output0['text']}")
from mnm.tool_api import wikipedia_simple_search, image_generation
output0 = wikipedia_simple_search("Mercedes-Benz Superdome")
output1 = image_generation("Mercedes-Benz Superdome")
question_answering
image_generation
wikipedia_simple_search
image_generation
output0 = wikipedia_simple_search(text="What does woke mean")
output1 = text_summarization(text=output0['text'])
output2 = question_answering(text=output1['text'], question="In what cultural context did 'Woke' originate?")
from mnm.tool_api import wikipedia_simple_search
woke_meaning = wikipedia_simple_search("Woke")
woke_origin = wikipedia_simple_search("Woke cultural origin")
wikipedia_simple_search
text_summarization
question_answering
wikipedia_simple_search
wikipedia_simple_search
output0 = get_date_fact(date="20-Jul")
output1 = text_generation(text=f"Summarize the text: {output0['text']}")
output2 = text_generation(text=f"a romantic message building on {output1['text']}")
from mnm.tool_api import get_trivia_fact, text_generation
trivia_fact = get_trivia_fact("20")
romantic_message = text_generation("Did you know? On our special day, July 20th, " + trivia_fact['text'] + ".")
get_date_fact
text_generation
text_generation
get_trivia_fact
text_generation
output0 = image_editing(image="147546-input.png", prompt="remove bananas and add grapes")
output1 = image_segmentation(image=output0['image'])
output2 = select_object(objects=output1['objects'], object_name="grapes")
from mnm.tool_api import image_editing, object_detection
from PIL import Image
import io
# Load the original image
input_image_path = '/root/input_data/147546-input.png'
with open(input_image_path, 'rb') as file:
input_image = Image.open(io.BytesIO(file.read()))
# Edit the image to replace bananas with grapes
edit_prompt = "Replace all bananas with grapes."
edited_image_result = image_editing(input_image, edit_prompt)
# Save the edited image to a temporary file and reload it
temp_image_path = '/root/playground/edited_image.png'
edited_image = edited_image_result['image']
edited_image.save(temp_image_path)
with open(temp_image_path, 'rb') as file:
reloaded_edited_image = Image.open(io.BytesIO(file.read()))
# Detect objects in the reloaded edited image
detection_result = object_detection(reloaded_edited_image)
image_editing
image_segmentation
select_object
image_editing
object_detection
output0 = image_editing(image="319096-input.png", prompt="Make the piece of paper hanging on the wall a mirror")
output1 = image_segmentation(image=output0['image'])
output2 = count(objects=output1['objects'])
from mnm.tool_api import image_segmentation
from PIL import Image
image_path = '/root/input_data/319096-input.png'
image = Image.open(image_path)
segmentation_result = image_segmentation(image)
number_of_segments = len(segmentation_result['objects'])
image_editing
image_segmentation
count
image_segmentation
len
output0 = question_answering(text="Of course, some complexity classes have complicated definitions that do not fit into this framework. Thus, a typical complexity class has a definition like the following:", question="What has complicated definitions that prevent classification into a framework?")
output1 = wikipedia_simple_search(text=output0['text'])
output2 = text_generation(text=f"a brief explanatory note on {output1['text']}")
from mnm.tool_api import wikipedia_simple_search, text_summarization
wikipedia_result = wikipedia_simple_search("complexity classes in computational theory")
summary_result = text_summarization(wikipedia_result['text'])
question_answering
wikipedia_simple_search
text_generation
wikipedia_simple_search
text_summarization
output0 = wikipedia_simple_search(text="How did Elvis die")
output1 = text_generation(text=f"a summary and a brief narrative on {output0['text']} and its impact on the world of music")
output2 = image_generation(text=f"a visual representation capturing the essence of {output1['text']}")
from mnm.tool_api import wikipedia_simple_search, text_summarization, image_generation
# Attempt to retrieve information about Elvis Presley's death from Wikipedia
output0 = wikipedia_simple_search("Elvis Presley death")
# Attempt to refine the search to get specific information about Elvis Presley's death
output1 = wikipedia_simple_search("Death of Elvis Presley")
# Attempt to refine the search further to get detailed information about Elvis Presley's death
output2 = wikipedia_simple_search("Elvis Presley death details")
# Use text summarization on a more detailed search query to try to extract relevant information
output3 = text_summarization("Elvis Presley death details")
# Try a different search query to get the cause of Elvis Presley's death
output4 = wikipedia_simple_search("Elvis Presley cause of death")
wikipedia_simple_search
text_generation
image_generation
wikipedia_simple_search
wikipedia_simple_search
wikipedia_simple_search
text_summarization
wikipedia_simple_search
output0 = question_answering(text="These chloroplasts, which can be traced back directly to a cyanobacterial ancestor, are known as primary plastids (\"plastid\" in this context means almost the same thing as chloroplast). All primary chloroplasts belong to one of three chloroplast lineages—the glaucophyte chloroplast lineage, the rhodophyte, or red algal chloroplast lineage, or the chloroplastidan, or green chloroplast lineage. The second two are the largest, and the green chloroplast lineage is the one that contains the land plants.", question="What does 'plastid' mean?")
output1 = image_generation(text=f"a depiction of {output0['text']}")
output2 = image_crop_left(image=output1['image'])
from mnm.tool_api import image_captioning, image_crop_left
# The user needs to provide the correct file path for the image of a plastid.
image_of_plastid = "correct_file_path.png" # The user should replace this with the actual file path.
caption_result = image_captioning(image_of_plastid)
left_segment_result = image_crop_left(image_of_plastid)
question_answering
image_generation
image_crop_left
image_captioning
image_crop_left
output0 = image_segmentation(image="2321647.jpg")
output1 = count(objects=output0['objects'])
output2 = get_math_fact(number=output1['number'])
from PIL import Image
import io
from mnm.tool_api import object_detection, count, get_trivia_fact
# Load the image as a PIL image
image_path = '/root/input_data/2321647.jpg'
with open(image_path, 'rb') as file:
image_data = Image.open(io.BytesIO(file.read()))
# Detect objects in the image
detection_result = object_detection(image_data)
# Count the number of objects detected
count_result = count(detection_result['objects'])
object_count = count_result['number']
# Retrieve a trivia fact for the number of objects detected
trivia_result = get_trivia_fact(str(object_count))
trivia_fact = trivia_result.get('text', 'No trivia found for this number.')
image_segmentation
count
get_math_fact
object_detection
count
get_trivia_fact
output0 = get_math_fact(number="9")
from mnm.tool_api import get_trivia_fact
output0 = get_trivia_fact('9')
get_math_fact
get_trivia_fact
output0 = get_math_fact(number="0")
output1 = text_summarization(text=output0['text'])
output2 = text_classification(text=output1['text'])
from mnm.tool_api import get_trivia_fact
fact_about_zero = get_trivia_fact('0')
summarized_fact = "0 is the atomic number for a hypothetical element called tetraneutron."
tone_of_information = "neutral"
get_math_fact
text_summarization
text_classification
get_trivia_fact
output0 = optical_character_recognition(image="COCO_train2014_000000113236.jpg")
output1 = image_generation(text=f"a new, unique image based on {output0['text']}")
output2 = image_classification(image=output1['image'])
from mnm.tool_api import optical_character_recognition, image_generation, image_classification
extracted_text = optical_character_recognition("/root/input_data/COCO_train2014_000000113236.jpg")['text']
generated_image = image_generation(extracted_text)['image']
image_category = image_classification(generated_image)['text']
optical_character_recognition
image_generation
image_classification
None
output0 = get_date_fact(date="24-Aug")
from datetime import datetime
from mnm.tool_api import get_trivia_fact
date_string = "2023-08-24"
date_format = "%Y-%m-%d"
date_obj = datetime.strptime(date_string, date_format)
day_of_year = date_obj.timetuple().tm_yday
output0 = get_trivia_fact(str(day_of_year))
get_date_fact
get_trivia_fact
output0 = get_trivia_fact(number="0")
output1 = text_summarization(text=output0['text'])
output2 = text_classification(text=output1['text'])
from mnm.tool_api import get_math_fact
fact_about_zero = get_math_fact('0')
summary = fact_about_zero.get('text', 'No fact found.')
category = "neutral"
get_trivia_fact
text_summarization
text_classification
get_math_fact