File size: 3,409 Bytes
197f5ec
 
 
8a56d57
197f5ec
 
403ed1b
197f5ec
 
 
 
8a56d57
197f5ec
faf13f8
8a56d57
197f5ec
 
 
faf13f8
8a56d57
 
 
faf13f8
197f5ec
 
faf13f8
8a56d57
 
 
 
 
 
faf13f8
197f5ec
 
8a56d57
 
197f5ec
8a56d57
 
 
 
 
 
faf13f8
8a56d57
 
 
197f5ec
8a56d57
 
 
 
197f5ec
8a56d57
 
 
 
197f5ec
8a56d57
 
 
 
197f5ec
faf13f8
8a56d57
197f5ec
8a56d57
 
 
 
 
197f5ec
 
 
8a56d57
 
 
 
 
 
 
 
 
197f5ec
8a56d57
197f5ec
8a56d57
 
 
 
 
 
 
 
197f5ec
8a56d57
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import streamlit as st

from ecologits.tracers.utils import llm_impacts
from src.impacts import display_impacts, display_equivalent
from src.utils import format_impacts
from src.content import WARNING_CLOSED_SOURCE, WARNING_MULTI_MODAL, WARNING_BOTH
from src.models import load_models

from src.constants import PROMPTS


def calculator_mode():
    with st.container(border=True):
        df = load_models(filter_main=True)

        col1, col2, col3 = st.columns(3)

        with col1:
            provider = st.selectbox(
                label="Provider",
                options=[x for x in df["provider_clean"].unique()],
                index=7,
            )

        with col2:
            model = st.selectbox(
                label="Model",
                options=[
                    x
                    for x in df["name_clean"].unique()
                    if x in df[df["provider_clean"] == provider]["name_clean"].unique()
                ],
            )

        with col3:
            output_tokens = st.selectbox("Example prompt", [x[0] for x in PROMPTS])

        # WARNING DISPLAY
        provider_raw = df[
            (df["provider_clean"] == provider) & (df["name_clean"] == model)
        ]["provider"].values[0]
        model_raw = df[
            (df["provider_clean"] == provider) & (df["name_clean"] == model)
        ]["name"].values[0]

        df_filtered = df[
            (df["provider_clean"] == provider) & (df["name_clean"] == model)
        ]

        if (
            df_filtered["warning_arch"].values[0]
            and not df_filtered["warning_multi_modal"].values[0]
        ):
            st.warning(WARNING_CLOSED_SOURCE)
        if (
            df_filtered["warning_multi_modal"].values[0]
            and not df_filtered["warning_arch"].values[0]
        ):
            st.warning(WARNING_MULTI_MODAL)
        if (
            df_filtered["warning_arch"].values[0]
            and df_filtered["warning_multi_modal"].values[0]
        ):
            st.warning(WARNING_BOTH)

    try:
        impacts = llm_impacts(
            provider=provider_raw,
            model_name=model_raw,
            output_token_count=[x[1] for x in PROMPTS if x[0] == output_tokens][0],
            request_latency=100000,
        )

        impacts, _, _ = format_impacts(impacts)

        with st.container(border=True):
            st.markdown(
                '<h3 align = "center">Environmental impacts</h3>',
                unsafe_allow_html=True,
            )
            st.markdown(
                '<p align = "center">To understand how the environmental impacts are computed go to the 📖 Methodology tab.</p>',
                unsafe_allow_html=True,
            )
            display_impacts(impacts)

        with st.container(border=True):
            st.markdown(
                '<h3 align = "center">That\'s equivalent to ...</h3>',
                unsafe_allow_html=True,
            )
            st.markdown(
                '<p align = "center">Making this request to the LLM is equivalent to the following actions :</p>',
                unsafe_allow_html=True,
            )
            display_equivalent(impacts)

    except Exception:
        st.error(
            "Could not find the model in the repository. Please try another model."
        )