Merge remote-tracking branch 'origin/main' into ryan/flux-regional-prompting

This commit is contained in:
psychedelicious 2024-11-29 15:19:39 +10:00
commit 5910892c33
30 changed files with 521 additions and 49 deletions

14
SECURITY.md Normal file
View File

@ -0,0 +1,14 @@
# Security Policy
## Supported Versions
Only the latest version of Invoke will receive security updates.
We do not currently maintain multiple versions of the application with updates.
## Reporting a Vulnerability
To report a vulnerability, contact the Invoke team directly at security@invoke.ai
At this time, we do not maintain a formal bug bounty program.
You can also share identified security issues with our team on huntr.com

View File

@ -38,7 +38,7 @@ This project is a combined effort of dedicated people from across the world. [C
## Code of Conduct
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
The InvokeAI community is a welcoming place, and we want your help in maintaining that. Please review our [Code of Conduct](https://github.com/invoke-ai/InvokeAI/blob/main/docs/CODE_OF_CONDUCT.md) to learn more - it's essential to maintaining a respectful and inclusive environment.
By making a contribution to this project, you certify that:

View File

@ -0,0 +1,59 @@
from pydantic import ValidationInfo, field_validator
from invokeai.app.invocations.baseinvocation import (
BaseInvocation,
BaseInvocationOutput,
Classification,
invocation,
invocation_output,
)
from invokeai.app.invocations.fields import InputField, OutputField
from invokeai.app.services.shared.invocation_context import InvocationContext
@invocation_output("image_panel_coordinate_output")
class ImagePanelCoordinateOutput(BaseInvocationOutput):
x_left: int = OutputField(description="The left x-coordinate of the panel.")
y_top: int = OutputField(description="The top y-coordinate of the panel.")
width: int = OutputField(description="The width of the panel.")
height: int = OutputField(description="The height of the panel.")
@invocation(
"image_panel_layout",
title="Image Panel Layout",
tags=["image", "panel", "layout"],
category="image",
version="1.0.0",
classification=Classification.Prototype,
)
class ImagePanelLayoutInvocation(BaseInvocation):
"""Get the coordinates of a single panel in a grid. (If the full image shape cannot be divided evenly into panels,
then the grid may not cover the entire image.)
"""
width: int = InputField(description="The width of the entire grid.")
height: int = InputField(description="The height of the entire grid.")
num_cols: int = InputField(ge=1, default=1, description="The number of columns in the grid.")
num_rows: int = InputField(ge=1, default=1, description="The number of rows in the grid.")
panel_col_idx: int = InputField(ge=0, default=0, description="The column index of the panel to be processed.")
panel_row_idx: int = InputField(ge=0, default=0, description="The row index of the panel to be processed.")
@field_validator("panel_col_idx")
def validate_panel_col_idx(cls, v: int, info: ValidationInfo) -> int:
if v < 0 or v >= info.data["num_cols"]:
raise ValueError(f"panel_col_idx must be between 0 and {info.data['num_cols'] - 1}")
return v
@field_validator("panel_row_idx")
def validate_panel_row_idx(cls, v: int, info: ValidationInfo) -> int:
if v < 0 or v >= info.data["num_rows"]:
raise ValueError(f"panel_row_idx must be between 0 and {info.data['num_rows'] - 1}")
return v
def invoke(self, context: InvocationContext) -> ImagePanelCoordinateOutput:
x_left = self.panel_col_idx * (self.width // self.num_cols)
y_top = self.panel_row_idx * (self.height // self.num_rows)
width = self.width // self.num_cols
height = self.height // self.num_rows
return ImagePanelCoordinateOutput(x_left=x_left, y_top=y_top, width=width, height=height)

View File

@ -86,7 +86,7 @@ class ModelLoadService(ModelLoadServiceBase):
def torch_load_file(checkpoint: Path) -> AnyModel:
scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0:
if scan_result.infected_files != 0 or scan_result.scan_err:
raise Exception("The model at {checkpoint} is potentially infected by malware. Aborting load.")
result = torch_load(checkpoint, map_location="cpu")
return result

View File

@ -469,7 +469,7 @@ class ModelProbe(object):
"""
# scan model
scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0:
if scan_result.infected_files != 0 or scan_result.scan_err:
raise Exception("The model {model_name} is potentially infected by malware. Aborting import.")

View File

@ -44,7 +44,7 @@ def _fast_safetensors_reader(path: str) -> Dict[str, torch.Tensor]:
return checkpoint
def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str, torch.Tensor]:
def read_checkpoint_meta(path: Union[str, Path], scan: bool = True) -> Dict[str, torch.Tensor]:
if str(path).endswith(".safetensors"):
try:
path_str = path.as_posix() if isinstance(path, Path) else path
@ -55,7 +55,7 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = False) -> Dict[str
else:
if scan:
scan_result = scan_file_path(path)
if scan_result.infected_files != 0:
if scan_result.infected_files != 0 or scan_result.scan_err:
raise Exception(f'The model file "{path}" is potentially infected by malware. Aborting import.')
if str(path).endswith(".gguf"):
# The GGUF reader used here uses numpy memmap, so these tensors are not loaded into memory during this function

View File

@ -1320,8 +1320,9 @@
"controlNetBeginEnd": {
"heading": "Begin / End Step Percentage",
"paragraphs": [
"The part of the of the denoising process that will have the Control Adapter applied.",
"Generally, Control Adapters applied at the start of the process guide composition, and Control Adapters applied at the end guide details."
"This setting determines which portion of the denoising (generation) process incorporates the guidance from this layer.",
"• Start Step (%): Specifies when to begin applying the guidance from this layer during the generation process.",
"• End Step (%): Specifies when to stop applying this layer's guidance and revert general guidance from the model and other settings."
]
},
"controlNetControlMode": {
@ -1339,13 +1340,15 @@
"paragraphs": ["Method to fit Control Adapter's input image size to the output generation size."]
},
"ipAdapterMethod": {
"heading": "Method",
"paragraphs": ["Method by which to apply the current IP Adapter."]
"heading": "Mode",
"paragraphs": ["The mode defines how the reference image will guide the generation process."]
},
"controlNetWeight": {
"heading": "Weight",
"paragraphs": [
"Weight of the Control Adapter. Higher weight will lead to larger impacts on the final image."
"Adjusts how strongly the layer influences the generation process",
"• Higher Weight (.75-2): Creates a more significant impact on the final result.",
"• Lower Weight (0-.75): Creates a smaller impact on the final result."
]
},
"dynamicPrompts": {
@ -1782,6 +1785,7 @@
"pullBboxIntoLayer": "Pull Bbox into Layer",
"pullBboxIntoReferenceImage": "Pull Bbox into Reference Image",
"showProgressOnCanvas": "Show Progress on Canvas",
"useImage": "Use Image",
"prompt": "Prompt",
"negativePrompt": "Negative Prompt",
"beginEndStepPercentShort": "Begin/End %",
@ -1794,6 +1798,7 @@
"resetGenerationSettings": "Reset Generation Settings",
"replaceCurrent": "Replace Current",
"controlLayerEmptyState": "<UploadButton>Upload an image</UploadButton>, drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer, or draw on the canvas to get started.",
"referenceImageEmptyState": "<UploadButton>Upload an image</UploadButton> or drag an image from the <GalleryButton>gallery</GalleryButton> onto this layer to get started.",
"controlMode": {
"controlMode": "Control Mode",
"balanced": "Balanced (recommended)",
@ -1802,10 +1807,13 @@
"megaControl": "Mega Control"
},
"ipAdapterMethod": {
"ipAdapterMethod": "IP Adapter Method",
"ipAdapterMethod": "Mode",
"full": "Style and Composition",
"fullDesc": "Applies visual style (colors, textures) & composition (layout, structure).",
"style": "Style Only",
"composition": "Composition Only"
"styleDesc": "Applies visual style (colors, textures) without considering its layout.",
"composition": "Composition Only",
"compositionDesc": "Replicates layout & structure while ignoring the reference's style."
},
"fill": {
"fillColor": "Fill Color",

View File

@ -1,3 +1,4 @@
import { useStore } from '@nanostores/react';
import { useAppStore } from 'app/store/storeHooks';
import { useAssertSingleton } from 'common/hooks/useAssertSingleton';
import { withResultAsync } from 'common/util/result';
@ -9,6 +10,7 @@ import { imageDTOToImageObject } from 'features/controlLayers/store/util';
import { $imageViewer } from 'features/gallery/components/ImageViewer/useImageViewer';
import { sentImageToCanvas } from 'features/gallery/store/actions';
import { parseAndRecallAllMetadata } from 'features/metadata/util/handlers';
import { $hasTemplates } from 'features/nodes/store/nodesSlice';
import { $isWorkflowListMenuIsOpen } from 'features/nodes/store/workflowListMenu';
import { $isStylePresetsMenuOpen, activeStylePresetIdChanged } from 'features/stylePresets/store/stylePresetSlice';
import { toast } from 'features/toast/toast';
@ -51,6 +53,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
const { t } = useTranslation();
// Use a ref to ensure that we only perform the action once
const didInit = useRef(false);
const didParseOpenAPISchema = useStore($hasTemplates);
const store = useAppStore();
const { getAndLoadWorkflow } = useGetAndLoadLibraryWorkflow();
@ -174,7 +177,7 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
);
useEffect(() => {
if (didInit.current || !action) {
if (didInit.current || !action || !didParseOpenAPISchema) {
return;
}
@ -187,22 +190,29 @@ export const useStudioInitAction = (action?: StudioInitAction) => {
case 'selectStylePreset':
handleSelectStylePreset(action.data.stylePresetId);
break;
case 'sendToCanvas':
handleSendToCanvas(action.data.imageName);
break;
case 'useAllParameters':
handleUseAllMetadata(action.data.imageName);
break;
case 'goToDestination':
handleGoToDestination(action.data.destination);
break;
default:
break;
}
}, [
handleSendToCanvas,
handleUseAllMetadata,
action,
handleLoadWorkflow,
handleSelectStylePreset,
handleGoToDestination,
handleLoadWorkflow,
didParseOpenAPISchema,
]);
};

View File

@ -46,7 +46,7 @@ const REGION_TARGETS: Record<FocusRegionName, Set<HTMLElement>> = {
/**
* The currently-focused region or `null` if no region is focused.
*/
const $focusedRegion = atom<FocusRegionName | null>(null);
export const $focusedRegion = atom<FocusRegionName | null>(null);
/**
* A map of focus regions to atoms that indicate if that region is focused.

View File

@ -1,8 +1,10 @@
import type { ComboboxOnChange } from '@invoke-ai/ui-library';
import { Combobox, FormControl, FormLabel } from '@invoke-ai/ui-library';
import { useAppSelector } from 'app/store/storeHooks';
import { InformationalPopover } from 'common/components/InformationalPopover/InformationalPopover';
import type { IPMethodV2 } from 'features/controlLayers/store/types';
import { isIPMethodV2 } from 'features/controlLayers/store/types';
import { selectSystemShouldEnableModelDescriptions } from 'features/system/store/systemSlice';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { assert } from 'tsafe';
@ -14,13 +16,27 @@ type Props = {
export const IPAdapterMethod = memo(({ method, onChange }: Props) => {
const { t } = useTranslation();
const shouldShowModelDescriptions = useAppSelector(selectSystemShouldEnableModelDescriptions);
const options: { label: string; value: IPMethodV2 }[] = useMemo(
() => [
{ label: t('controlLayers.ipAdapterMethod.full'), value: 'full' },
{ label: t('controlLayers.ipAdapterMethod.style'), value: 'style' },
{ label: t('controlLayers.ipAdapterMethod.composition'), value: 'composition' },
{
label: t('controlLayers.ipAdapterMethod.full'),
value: 'full',
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.fullDesc') : undefined,
},
{
label: t('controlLayers.ipAdapterMethod.style'),
value: 'style',
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.styleDesc') : undefined,
},
{
label: t('controlLayers.ipAdapterMethod.composition'),
value: 'composition',
description: shouldShowModelDescriptions ? t('controlLayers.ipAdapterMethod.compositionDesc') : undefined,
},
],
[t]
[t, shouldShowModelDescriptions]
);
const _onChange = useCallback<ComboboxOnChange>(
(v) => {

View File

@ -5,6 +5,7 @@ import { BeginEndStepPct } from 'features/controlLayers/components/common/BeginE
import { CanvasEntitySettingsWrapper } from 'features/controlLayers/components/common/CanvasEntitySettingsWrapper';
import { Weight } from 'features/controlLayers/components/common/Weight';
import { IPAdapterMethod } from 'features/controlLayers/components/IPAdapter/IPAdapterMethod';
import { IPAdapterSettingsEmptyState } from 'features/controlLayers/components/IPAdapter/IPAdapterSettingsEmptyState';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { usePullBboxIntoGlobalReferenceImage } from 'features/controlLayers/hooks/saveCanvasHooks';
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
@ -17,7 +18,7 @@ import {
referenceImageIPAdapterWeightChanged,
} from 'features/controlLayers/store/canvasSlice';
import { selectIsFLUX } from 'features/controlLayers/store/paramsSlice';
import { selectCanvasSlice, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
import { selectCanvasSlice, selectEntity, selectEntityOrThrow } from 'features/controlLayers/store/selectors';
import type { CanvasEntityIdentifier, CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
import type { SetGlobalReferenceImageDndTargetData } from 'features/dnd/dnd';
import { setGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
@ -35,7 +36,7 @@ const buildSelectIPAdapter = (entityIdentifier: CanvasEntityIdentifier<'referenc
(canvas) => selectEntityOrThrow(canvas, entityIdentifier, 'IPAdapterSettings').ipAdapter
);
export const IPAdapterSettings = memo(() => {
const IPAdapterSettingsContent = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const entityIdentifier = useEntityIdentifierContext('reference_image');
@ -134,4 +135,25 @@ export const IPAdapterSettings = memo(() => {
);
});
IPAdapterSettingsContent.displayName = 'IPAdapterSettingsContent';
const buildSelectIPAdapterHasImage = (entityIdentifier: CanvasEntityIdentifier<'reference_image'>) =>
createSelector(selectCanvasSlice, (canvas) => {
const referenceImage = selectEntity(canvas, entityIdentifier);
return !!referenceImage && referenceImage.ipAdapter.image !== null;
});
export const IPAdapterSettings = memo(() => {
const entityIdentifier = useEntityIdentifierContext('reference_image');
const selectIPAdapterHasImage = useMemo(() => buildSelectIPAdapterHasImage(entityIdentifier), [entityIdentifier]);
const hasImage = useAppSelector(selectIPAdapterHasImage);
if (!hasImage) {
return <IPAdapterSettingsEmptyState />;
}
return <IPAdapterSettingsContent />;
});
IPAdapterSettings.displayName = 'IPAdapterSettings';

View File

@ -0,0 +1,64 @@
import { Button, Flex, Text } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
import type { SetGlobalReferenceImageDndTargetData } from 'features/dnd/dnd';
import { setGlobalReferenceImageDndTarget } from 'features/dnd/dnd';
import { DndDropTarget } from 'features/dnd/DndDropTarget';
import { setGlobalReferenceImage } from 'features/imageActions/actions';
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
import { memo, useCallback, useMemo } from 'react';
import { Trans, useTranslation } from 'react-i18next';
import type { ImageDTO } from 'services/api/types';
export const IPAdapterSettingsEmptyState = memo(() => {
const { t } = useTranslation();
const entityIdentifier = useEntityIdentifierContext('reference_image');
const dispatch = useAppDispatch();
const isBusy = useCanvasIsBusy();
const onUpload = useCallback(
(imageDTO: ImageDTO) => {
setGlobalReferenceImage({ imageDTO, entityIdentifier, dispatch });
},
[dispatch, entityIdentifier]
);
const uploadApi = useImageUploadButton({ onUpload, allowMultiple: false });
const onClickGalleryButton = useCallback(() => {
dispatch(activeTabCanvasRightPanelChanged('gallery'));
}, [dispatch]);
const dndTargetData = useMemo<SetGlobalReferenceImageDndTargetData>(
() => setGlobalReferenceImageDndTarget.getData({ entityIdentifier }),
[entityIdentifier]
);
const components = useMemo(
() => ({
UploadButton: (
<Button isDisabled={isBusy} size="sm" variant="link" color="base.300" {...uploadApi.getUploadButtonProps()} />
),
GalleryButton: (
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
),
}),
[isBusy, onClickGalleryButton, uploadApi]
);
return (
<Flex flexDir="column" gap={3} position="relative" w="full" p={4}>
<Text textAlign="center" color="base.300">
<Trans i18nKey="controlLayers.referenceImageEmptyState" components={components} />
</Text>
<input {...uploadApi.getUploadInputProps()} />
<DndDropTarget
dndTarget={setGlobalReferenceImageDndTarget}
dndTargetData={dndTargetData}
label={t('controlLayers.useImage')}
isDisabled={isBusy}
/>
</Flex>
);
});
IPAdapterSettingsEmptyState.displayName = 'IPAdapterSettingsEmptyState';

View File

@ -6,6 +6,7 @@ import { Weight } from 'features/controlLayers/components/common/Weight';
import { IPAdapterImagePreview } from 'features/controlLayers/components/IPAdapter/IPAdapterImagePreview';
import { IPAdapterMethod } from 'features/controlLayers/components/IPAdapter/IPAdapterMethod';
import { IPAdapterModel } from 'features/controlLayers/components/IPAdapter/IPAdapterModel';
import { RegionalGuidanceIPAdapterSettingsEmptyState } from 'features/controlLayers/components/RegionalGuidance/RegionalGuidanceIPAdapterSettingsEmptyState';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { usePullBboxIntoRegionalGuidanceReferenceImage } from 'features/controlLayers/hooks/saveCanvasHooks';
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
@ -19,12 +20,12 @@ import {
rgIPAdapterWeightChanged,
} from 'features/controlLayers/store/canvasSlice';
import { selectCanvasSlice, selectRegionalGuidanceReferenceImage } from 'features/controlLayers/store/selectors';
import type { CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
import type { CanvasEntityIdentifier, CLIPVisionModelV2, IPMethodV2 } from 'features/controlLayers/store/types';
import type { SetRegionalGuidanceReferenceImageDndTargetData } from 'features/dnd/dnd';
import { setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
import { memo, useCallback, useMemo } from 'react';
import { useTranslation } from 'react-i18next';
import { PiBoundingBoxBold, PiTrashSimpleFill } from 'react-icons/pi';
import { PiBoundingBoxBold, PiXBold } from 'react-icons/pi';
import type { ImageDTO, IPAdapterModelConfig } from 'services/api/types';
import { assert } from 'tsafe';
@ -32,7 +33,7 @@ type Props = {
referenceImageId: string;
};
export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Props) => {
const RegionalGuidanceIPAdapterSettingsContent = memo(({ referenceImageId }: Props) => {
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
const { t } = useTranslation();
const dispatch = useAppDispatch();
@ -115,7 +116,7 @@ export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Pro
size="sm"
variant="link"
alignSelf="stretch"
icon={<PiTrashSimpleFill />}
icon={<PiXBold />}
tooltip={t('controlLayers.deleteReferenceImage')}
aria-label={t('controlLayers.deleteReferenceImage')}
onClick={onDeleteIPAdapter}
@ -161,4 +162,31 @@ export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Pro
);
});
RegionalGuidanceIPAdapterSettingsContent.displayName = 'RegionalGuidanceIPAdapterSettingsContent';
const buildSelectIPAdapterHasImage = (
entityIdentifier: CanvasEntityIdentifier<'regional_guidance'>,
referenceImageId: string
) =>
createSelector(selectCanvasSlice, (canvas) => {
const referenceImage = selectRegionalGuidanceReferenceImage(canvas, entityIdentifier, referenceImageId);
return !!referenceImage && referenceImage.ipAdapter.image !== null;
});
export const RegionalGuidanceIPAdapterSettings = memo(({ referenceImageId }: Props) => {
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
const selectIPAdapterHasImage = useMemo(
() => buildSelectIPAdapterHasImage(entityIdentifier, referenceImageId),
[entityIdentifier, referenceImageId]
);
const hasImage = useAppSelector(selectIPAdapterHasImage);
if (!hasImage) {
return <RegionalGuidanceIPAdapterSettingsEmptyState referenceImageId={referenceImageId} />;
}
return <RegionalGuidanceIPAdapterSettingsContent referenceImageId={referenceImageId} />;
});
RegionalGuidanceIPAdapterSettings.displayName = 'RegionalGuidanceIPAdapterSettings';

View File

@ -0,0 +1,76 @@
import { Button, Flex, Text } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { useImageUploadButton } from 'common/hooks/useImageUploadButton';
import { useEntityIdentifierContext } from 'features/controlLayers/contexts/EntityIdentifierContext';
import { useCanvasIsBusy } from 'features/controlLayers/hooks/useCanvasIsBusy';
import type { SetRegionalGuidanceReferenceImageDndTargetData } from 'features/dnd/dnd';
import { setRegionalGuidanceReferenceImageDndTarget } from 'features/dnd/dnd';
import { DndDropTarget } from 'features/dnd/DndDropTarget';
import { setRegionalGuidanceReferenceImage } from 'features/imageActions/actions';
import { activeTabCanvasRightPanelChanged } from 'features/ui/store/uiSlice';
import { memo, useCallback, useMemo } from 'react';
import { Trans, useTranslation } from 'react-i18next';
import type { ImageDTO } from 'services/api/types';
type Props = {
referenceImageId: string;
};
export const RegionalGuidanceIPAdapterSettingsEmptyState = memo(({ referenceImageId }: Props) => {
const { t } = useTranslation();
const entityIdentifier = useEntityIdentifierContext('regional_guidance');
const dispatch = useAppDispatch();
const isBusy = useCanvasIsBusy();
const onUpload = useCallback(
(imageDTO: ImageDTO) => {
setRegionalGuidanceReferenceImage({ imageDTO, entityIdentifier, referenceImageId, dispatch });
},
[dispatch, entityIdentifier, referenceImageId]
);
const uploadApi = useImageUploadButton({ onUpload, allowMultiple: false });
const onClickGalleryButton = useCallback(() => {
dispatch(activeTabCanvasRightPanelChanged('gallery'));
}, [dispatch]);
const dndTargetData = useMemo<SetRegionalGuidanceReferenceImageDndTargetData>(
() =>
setRegionalGuidanceReferenceImageDndTarget.getData({
entityIdentifier,
referenceImageId,
}),
[entityIdentifier, referenceImageId]
);
return (
<Flex flexDir="column" gap={3} position="relative" w="full" p={4}>
<Text textAlign="center" color="base.300">
<Trans
i18nKey="controlLayers.referenceImageEmptyState"
components={{
UploadButton: (
<Button
isDisabled={isBusy}
size="sm"
variant="link"
color="base.300"
{...uploadApi.getUploadButtonProps()}
/>
),
GalleryButton: (
<Button onClick={onClickGalleryButton} isDisabled={isBusy} size="sm" variant="link" color="base.300" />
),
}}
/>
</Text>
<input {...uploadApi.getUploadInputProps()} />
<DndDropTarget
dndTarget={setRegionalGuidanceReferenceImageDndTarget}
dndTargetData={dndTargetData}
label={t('controlLayers.useImage')}
isDisabled={isBusy}
/>
</Flex>
);
});
RegionalGuidanceIPAdapterSettingsEmptyState.displayName = 'RegionalGuidanceIPAdapterSettingsEmptyState';

View File

@ -1,4 +1,6 @@
import { IconButton } from '@invoke-ai/ui-library';
import { useStore } from '@nanostores/react';
import { $authToken } from 'app/store/nanostores/authToken';
import { useAppSelector } from 'app/store/storeHooks';
import { withResultAsync } from 'common/util/result';
import { selectSelectedImage } from 'features/controlLayers/store/canvasStagingAreaSlice';
@ -14,6 +16,7 @@ const TOAST_ID = 'SAVE_STAGING_AREA_IMAGE_TO_GALLERY';
export const StagingAreaToolbarSaveSelectedToGalleryButton = memo(() => {
const autoAddBoardId = useAppSelector(selectAutoAddBoardId);
const selectedImage = useAppSelector(selectSelectedImage);
const authToken = useStore($authToken);
const { t } = useTranslation();
@ -26,7 +29,14 @@ export const StagingAreaToolbarSaveSelectedToGalleryButton = memo(() => {
// the gallery without borking the canvas, which may need this image to exist.
const result = await withResultAsync(async () => {
// Download the image
const res = await fetch(selectedImage.imageDTO.image_url);
const requestOpts = authToken
? {
headers: {
Authorization: `Bearer ${authToken}`,
},
}
: {};
const res = await fetch(selectedImage.imageDTO.image_url, requestOpts);
const blob = await res.blob();
// Create a new file with the same name, which we will upload
const file = new File([blob], `copy_of_${selectedImage.imageDTO.image_name}`, { type: 'image/png' });
@ -56,7 +66,7 @@ export const StagingAreaToolbarSaveSelectedToGalleryButton = memo(() => {
status: 'error',
});
}
}, [autoAddBoardId, selectedImage, t]);
}, [autoAddBoardId, selectedImage, t, authToken]);
return (
<IconButton

View File

@ -9,6 +9,7 @@ import {
getEmptyRect,
getKonvaNodeDebugAttrs,
getPrefixedId,
offsetCoord,
} from 'features/controlLayers/konva/util';
import { selectSelectedEntityIdentifier } from 'features/controlLayers/store/selectors';
import type { Coordinate, Rect, RectWithRotation } from 'features/controlLayers/store/types';
@ -558,6 +559,25 @@ export class CanvasEntityTransformer extends CanvasModuleBase {
this.manager.stateApi.setEntityPosition({ entityIdentifier: this.parent.entityIdentifier, position });
};
nudgeBy = (offset: Coordinate) => {
// We can immediately move both the proxy rect and layer objects so we don't have to wait for a redux round-trip,
// which can take up to 2ms in my testing. This is optional, but can make the interaction feel more responsive,
// especially on lower-end devices.
// Get the relative position of the layer's objects, according to konva
const position = this.konva.proxyRect.position();
// Offset the position by the nudge amount
const newPosition = offsetCoord(position, offset);
// Set the new position of the proxy rect - this doesn't move the layer objects - only the outline rect
this.konva.proxyRect.setAttrs(newPosition);
// Sync the layer objects with the proxy rect - moves them to the new position
this.syncObjectGroupWithProxyRect();
// Push to redux. The state change will do a round-trip, and eventually make it back to the canvas classes, at
// which point the layer will be moved to the new position.
this.manager.stateApi.moveEntityBy({ entityIdentifier: this.parent.entityIdentifier, offset });
this.log.trace({ offset }, 'Nudged');
};
syncObjectGroupWithProxyRect = () => {
this.parent.renderer.konva.objectGroup.setAttrs({
x: this.konva.proxyRect.x(),

View File

@ -20,7 +20,8 @@ import {
controlLayerAdded,
entityBrushLineAdded,
entityEraserLineAdded,
entityMoved,
entityMovedBy,
entityMovedTo,
entityRasterized,
entityRectAdded,
entityReset,
@ -40,7 +41,8 @@ import type {
EntityBrushLineAddedPayload,
EntityEraserLineAddedPayload,
EntityIdentifierPayload,
EntityMovedPayload,
EntityMovedByPayload,
EntityMovedToPayload,
EntityRasterizedPayload,
EntityRectAddedPayload,
Rect,
@ -139,8 +141,15 @@ export class CanvasStateApiModule extends CanvasModuleBase {
/**
* Updates an entity's position, pushing state to redux.
*/
setEntityPosition = (arg: EntityMovedPayload) => {
this.store.dispatch(entityMoved(arg));
setEntityPosition = (arg: EntityMovedToPayload) => {
this.store.dispatch(entityMovedTo(arg));
};
/**
* Moves an entity by the give offset, pushing state to redux.
*/
moveEntityBy = (arg: EntityMovedByPayload) => {
this.store.dispatch(entityMovedBy(arg));
};
/**

View File

@ -1,9 +1,24 @@
import { $focusedRegion } from 'common/hooks/focus';
import type { CanvasManager } from 'features/controlLayers/konva/CanvasManager';
import { CanvasModuleBase } from 'features/controlLayers/konva/CanvasModuleBase';
import type { CanvasToolModule } from 'features/controlLayers/konva/CanvasTool/CanvasToolModule';
import { getPrefixedId } from 'features/controlLayers/konva/util';
import type { Coordinate } from 'features/controlLayers/store/types';
import type { Logger } from 'roarr';
type CanvasMoveToolModuleConfig = {
/**
* The number of pixels to nudge the entity by when moving with the arrow keys.
*/
NUDGE_PX: number;
};
const DEFAULT_CONFIG: CanvasMoveToolModuleConfig = {
NUDGE_PX: 1,
};
type NudgeKey = 'ArrowLeft' | 'ArrowRight' | 'ArrowUp' | 'ArrowDown';
export class CanvasMoveToolModule extends CanvasModuleBase {
readonly type = 'move_tool';
readonly id: string;
@ -12,6 +27,9 @@ export class CanvasMoveToolModule extends CanvasModuleBase {
readonly manager: CanvasManager;
readonly log: Logger;
config: CanvasMoveToolModuleConfig = DEFAULT_CONFIG;
nudgeOffsets: Record<NudgeKey, Coordinate>;
constructor(parent: CanvasToolModule) {
super();
this.id = getPrefixedId(this.type);
@ -19,8 +37,18 @@ export class CanvasMoveToolModule extends CanvasModuleBase {
this.manager = this.parent.manager;
this.path = this.manager.buildPath(this);
this.log = this.manager.buildLogger(this);
this.log.debug('Creating module');
this.nudgeOffsets = {
ArrowLeft: { x: -this.config.NUDGE_PX, y: 0 },
ArrowRight: { x: this.config.NUDGE_PX, y: 0 },
ArrowUp: { x: 0, y: -this.config.NUDGE_PX },
ArrowDown: { x: 0, y: this.config.NUDGE_PX },
};
}
isNudgeKey(key: string): key is NudgeKey {
return this.nudgeOffsets[key as NudgeKey] !== undefined;
}
syncCursorStyle = () => {
@ -32,4 +60,45 @@ export class CanvasMoveToolModule extends CanvasModuleBase {
selectedEntity.transformer.syncCursorStyle();
}
};
nudge = (nudgeKey: NudgeKey) => {
if ($focusedRegion.get() !== 'canvas') {
return;
}
const selectedEntity = this.manager.stateApi.getSelectedEntityAdapter();
if (!selectedEntity) {
return;
}
if (
selectedEntity.$isDisabled.get() ||
selectedEntity.$isEmpty.get() ||
selectedEntity.$isLocked.get() ||
selectedEntity.$isEntityTypeHidden.get()
) {
return;
}
const isBusy = this.manager.$isBusy.get();
const isMoveToolSelected = this.parent.$tool.get() === 'move';
const isThisEntityTransforming = this.manager.stateApi.$transformingAdapter.get() === selectedEntity;
if (isBusy) {
// When the canvas is busy, we shouldn't allow nudging - except when the canvas is busy transforming the selected
// entity. Nudging is allowed during transformation, regardless of the selected tool.
if (!isThisEntityTransforming) {
return;
}
} else {
// Otherwise, the canvas is not busy, and we should only allow nudging when the move tool is selected.
if (!isMoveToolSelected) {
return;
}
}
const offset = this.nudgeOffsets[nudgeKey];
selectedEntity.transformer.nudgeBy(offset);
};
}

View File

@ -528,11 +528,16 @@ export class CanvasToolModule extends CanvasModuleBase {
};
onKeyDown = (e: KeyboardEvent) => {
if (e.repeat) {
if (e.target instanceof HTMLInputElement || e.target instanceof HTMLTextAreaElement) {
return;
}
if (e.target instanceof HTMLInputElement || e.target instanceof HTMLTextAreaElement) {
// Handle nudging - must be before repeat, as we may want to catch repeating keys
if (this.tools.move.isNudgeKey(e.key)) {
this.tools.move.nudge(e.key);
}
if (e.repeat) {
return;
}

View File

@ -18,6 +18,7 @@ import type {
CanvasEntityType,
CanvasInpaintMaskState,
CanvasMetadata,
EntityMovedByPayload,
FillStyle,
RegionalGuidanceReferenceImageState,
RgbColor,
@ -51,7 +52,7 @@ import type {
EntityBrushLineAddedPayload,
EntityEraserLineAddedPayload,
EntityIdentifierPayload,
EntityMovedPayload,
EntityMovedToPayload,
EntityRasterizedPayload,
EntityRectAddedPayload,
IPMethodV2,
@ -1201,7 +1202,7 @@ export const canvasSlice = createSlice({
}
entity.fill.style = style;
},
entityMoved: (state, action: PayloadAction<EntityMovedPayload>) => {
entityMovedTo: (state, action: PayloadAction<EntityMovedToPayload>) => {
const { entityIdentifier, position } = action.payload;
const entity = selectEntity(state, entityIdentifier);
if (!entity) {
@ -1212,6 +1213,20 @@ export const canvasSlice = createSlice({
entity.position = position;
}
},
entityMovedBy: (state, action: PayloadAction<EntityMovedByPayload>) => {
const { entityIdentifier, offset } = action.payload;
const entity = selectEntity(state, entityIdentifier);
if (!entity) {
return;
}
if (!isRenderableEntity(entity)) {
return;
}
entity.position.x += offset.x;
entity.position.y += offset.y;
},
entityRasterized: (state, action: PayloadAction<EntityRasterizedPayload>) => {
const { entityIdentifier, imageObject, position, replaceObjects, isSelected } = action.payload;
const entity = selectEntity(state, entityIdentifier);
@ -1505,7 +1520,8 @@ export const {
entityIsLockedToggled,
entityFillColorChanged,
entityFillStyleChanged,
entityMoved,
entityMovedTo,
entityMovedBy,
entityDuplicated,
entityRasterized,
entityBrushLineAdded,

View File

@ -83,7 +83,7 @@ const initialState: ParamsState = {
canvasCoherenceMode: 'Gaussian Blur',
canvasCoherenceMinDenoise: 0,
canvasCoherenceEdgeSize: 16,
infillMethod: 'patchmatch',
infillMethod: 'lama',
infillTileSize: 32,
infillPatchmatchDownscaleSize: 1,
infillColorValue: { r: 0, g: 0, b: 0, a: 1 },

View File

@ -439,7 +439,8 @@ export type EntityIdentifierPayload<
entityIdentifier: CanvasEntityIdentifier<U>;
} & T;
export type EntityMovedPayload = EntityIdentifierPayload<{ position: Coordinate }>;
export type EntityMovedToPayload = EntityIdentifierPayload<{ position: Coordinate }>;
export type EntityMovedByPayload = EntityIdentifierPayload<{ offset: Coordinate }>;
export type EntityBrushLineAddedPayload = EntityIdentifierPayload<{
brushLine: CanvasBrushLineState | CanvasBrushLineWithPressureState;
}>;

View File

@ -58,6 +58,9 @@ const isAllowedOutputField = (nodeType: string, fieldName: string) => {
if (RESERVED_OUTPUT_FIELD_NAMES.includes(fieldName)) {
return false;
}
if (nodeType === 'image_batch' && fieldName !== 'image') {
return false;
}
return true;
};

View File

@ -1,6 +1,8 @@
import { ExternalLink, Flex, Spacer, Text } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import type { VideoData } from 'features/system/components/VideosModal/data';
import { memo } from 'react';
import { videoModalLinkClicked } from 'features/system/store/actions';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
const formatTime = ({ minutes, seconds }: { minutes: number; seconds: number }) => {
@ -9,7 +11,12 @@ const formatTime = ({ minutes, seconds }: { minutes: number; seconds: number })
export const VideoCard = memo(({ video }: { video: VideoData }) => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const { tKey, link, length } = video;
const handleLinkClick = useCallback(() => {
dispatch(videoModalLinkClicked(t(`supportVideos.videos.${tKey}.title`)));
}, [dispatch, t, tKey]);
return (
<Flex flexDir="column" gap={1}>
<Flex alignItems="center" gap={2}>
@ -18,7 +25,7 @@ export const VideoCard = memo(({ video }: { video: VideoData }) => {
</Text>
<Spacer />
<Text variant="subtext">{formatTime(length)}</Text>
<ExternalLink fontSize="sm" href={link} label={t('supportVideos.watch')} />
<ExternalLink fontSize="sm" href={link} label={t('supportVideos.watch')} onClick={handleLinkClick} />
</Flex>
<Text fontSize="md" variant="subtext">
{t(`supportVideos.videos.${tKey}.description`)}

View File

@ -10,6 +10,7 @@ import {
ModalOverlay,
Text,
} from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import ScrollableContent from 'common/components/OverlayScrollbars/ScrollableContent';
import { buildUseDisclosure } from 'common/hooks/useBoolean';
import {
@ -18,25 +19,45 @@ import {
studioSessionsPlaylistLink,
} from 'features/system/components/VideosModal/data';
import { VideoCardList } from 'features/system/components/VideosModal/VideoCardList';
import { videoModalLinkClicked } from 'features/system/store/actions';
import { discordLink } from 'features/system/store/constants';
import { memo } from 'react';
import { memo, useCallback } from 'react';
import { Trans, useTranslation } from 'react-i18next';
export const [useVideosModal] = buildUseDisclosure(false);
const StudioSessionsPlaylistLink = () => {
const dispatch = useAppDispatch();
const handleLinkClick = useCallback(() => {
dispatch(videoModalLinkClicked('Studio session playlist'));
}, [dispatch]);
return (
<ExternalLink
fontWeight="semibold"
href={studioSessionsPlaylistLink}
display="inline-flex"
label="Studio Sessions playlist"
onClick={handleLinkClick}
/>
);
};
const DiscordLink = () => {
return <ExternalLink fontWeight="semibold" href={discordLink} display="inline-flex" label="Discord" />;
const dispatch = useAppDispatch();
const handleLinkClick = useCallback(() => {
dispatch(videoModalLinkClicked('Discord'));
}, [dispatch]);
return (
<ExternalLink
fontWeight="semibold"
href={discordLink}
display="inline-flex"
label="Discord"
onClick={handleLinkClick}
/>
);
};
const components = {

View File

@ -1,19 +1,29 @@
import { IconButton } from '@invoke-ai/ui-library';
import { useAppDispatch } from 'app/store/storeHooks';
import { useVideosModal } from 'features/system/components/VideosModal/VideosModal';
import { memo } from 'react';
import { videoModalOpened } from 'features/system/store/actions';
import { memo, useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { PiYoutubeLogoFill } from 'react-icons/pi';
export const VideosModalButton = memo(() => {
const { t } = useTranslation();
const dispatch = useAppDispatch();
const videosModal = useVideosModal();
const onClickOpen = useCallback(() => {
dispatch(videoModalOpened());
videosModal.open();
}, [videosModal, dispatch]);
return (
<IconButton
aria-label={t('supportVideos.supportVideos')}
variant="link"
icon={<PiYoutubeLogoFill fontSize={20} />}
boxSize={8}
onClick={videosModal.open}
onClick={onClickOpen}
/>
);
});

View File

@ -37,8 +37,8 @@ export const gettingStartedVideos: VideoData[] = [
},
{
tKey: 'creatingAndComposingOnInvokesControlCanvas',
link: 'https://www.youtube.com/watch?v=MohWv5GZVGM&list=PLvWK1Kc8iXGrQy8r9TYg6QdUuJ5MMx-ZO&index=5&t=28s&pp=iAQB',
length: { minutes: 13, seconds: 56 },
link: 'https://www.youtube.com/watch?v=O4LaFcYFxlA',
length: { minutes: 2, seconds: 52 },
},
{
tKey: 'upscaling',

View File

@ -0,0 +1,4 @@
import { createAction } from '@reduxjs/toolkit';
export const videoModalLinkClicked = createAction<string>('system/videoModalLinkClicked');
export const videoModalOpened = createAction('system/videoModalOpened');

View File

@ -1 +1 @@
__version__ = "5.4.2"
__version__ = "5.4.3rc1"

View File

@ -56,7 +56,7 @@ dependencies = [
"torchmetrics",
"torchsde",
"torchvision",
"transformers==4.41.1",
"transformers==4.46.3",
# Core application dependencies, pinned for reproducible builds.
"fastapi-events==0.11.1",