Skip to content

custom recordign area #6

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1079,7 +1079,6 @@ public SessionStatusCode BeginRecordingSession(string videoName,
foreach (iVidCapProVideo videoCam in videoCameras) {
//videoCam.InitSession();
videoCam.SetRenderTexture(rt);
videoCam.SetCaptureViewport();
videoCam.SetIsRecording(true);
}
return sessionStatus;
Expand Down
87 changes: 51 additions & 36 deletions iVidCapProUnityProject/Assets/Plugins/iVidCapPro/iVidCapProVideo.cs
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,13 @@ public class iVidCapProVideo : MonoBehaviour {

// A local reference to the target rendertexture.
private RenderTexture rt = null;
// The rectangle that defines the viewport to be captured to the rendertexture.

// The rectangle that defines the custom recordign area.
private Rect captureRect;

// The material which is used to resample the screen texture
// to the size of the custom recording area.
private Material blitMat;

// Whether or not recording from this camera is currently in progress.
private bool isRecording = false;
Expand All @@ -108,31 +112,33 @@ public void Awake () {
" and use the iVidCapPro SetCustomRenderTexture method instead. Failure to do so may " +
"result in a large framerate penalty.");
}

blitMat = new Material( Shader.Find("Custom/ividcappro") );
}

/// <summary>
/// Set the capture viewport of the camera on the rendertexture.
/// Ordinarily you don't need to call this, as it is set automatically
/// at the start of each recording session. If, however, you change
/// the viewport of the camera during the recording session, you need
/// to call this function each time the camera viewport is updated.
/// </summary>
public void SetCaptureViewport() {


Rect cameraRect = videoCam.rect;

captureRect.x = cameraRect.x * rt.width;
captureRect.y = cameraRect.y * rt.height;
captureRect.width = cameraRect.width * rt.width;
captureRect.height = cameraRect.height * rt.height;

if (isDedicated) {
// Set the aspect ratio of the camera to match the render texture.
videoCam.aspect = ((float)rt.width)/((float)rt.height);
}

/* ------------------------------------------------------------------------
-- SetCaptureRect --

Use this function when needing to record only a portion of the screen.
Function takes a rect with pixels screen coordinates.
------------------------------------------------------------------------ */
public void SetCaptureRect(Rect rect) {
captureRect.x = rect.x / (float)Screen.width;
captureRect.y = rect.y / (float)Screen.height;
captureRect.width = rect.width / (float)Screen.width;
captureRect.height = rect.height / (float)Screen.height;
}


/* ------------------------------------------------------------------------
-- SetCaptureRectWithNormalisedScreenCoordinates --

Use this function when needing to record only a portion of the screen.
Function takes a rect with normalised screen coordinates.
------------------------------------------------------------------------ */
public void SetCaptureRectWithNormalisedScreenCoordinates(Rect rect) {
captureRect = rect;
}

/* ------------------------------------------------------------------------
-- SetRenderTexture --

Expand Down Expand Up @@ -192,18 +198,27 @@ private void OnRenderImage (RenderTexture source, RenderTexture destination) {
// 08-Nov-2014 Nope - It's necessary after all. It's required for the case when
// there are multiple recording cameras.
RenderTexture.active = rt;

Rect rectZero = new Rect ();
bool bCustomCaptureRect = (captureRect.Equals (rectZero) == false);
if (bCustomCaptureRect) {

float cornerX1 = captureRect.x;
float cornerY1 = captureRect.y;
float cornerX2 = captureRect.x + captureRect.width;
float cornerY2 = captureRect.y + captureRect.height;

blitMat.SetFloat ("_CornerX1", cornerX1);
blitMat.SetFloat ("_CornerY1", cornerY1);
blitMat.SetFloat ("_CornerX2", cornerX2);
blitMat.SetFloat ("_CornerY2", cornerY2);

Graphics.Blit (source, rt, blitMat);

// We want to honor the size and location on the screen of the camera rendering
// rectangle. These GL routines allow us to restrict the rendering viewport to
// be that of the camera when we do the blit.
GL.PushMatrix();
GL.LoadPixelMatrix();
GL.Viewport(captureRect);

Graphics.Blit (source, rt);

// Restore the modelview and projection matrices.
GL.PopMatrix();
} else {

Graphics.Blit (source, rt);
}

// 17-Aug-2014 This appears to be unnecessary.
// 08-Nov-2014 Nope - It's necessary after all. See above.
Expand Down
52 changes: 52 additions & 0 deletions iVidCapProUnityProject/Assets/Plugins/iVidCapPro/ividcappro.shader
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@

Shader "Custom/ividcappro" {

Properties {
_MainTex ("", 2D) = "white" {}
_CornerX1 ("CornerX1", Range(0,1)) = 0.0
_CornerX2 ("CornerX2", Range(0,1)) = 1.0
_CornerY1 ("CornerY1", Range(0,1)) = 0.0
_CornerY2 ("CornerY2", Range(0,1)) = 0.5
}

SubShader {

ZTest Always Cull Off ZWrite Off Fog { Mode Off } //Rendering settings

Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
//we include "UnityCG.cginc" to use the appdata_img struct

struct v2f {
float4 pos : POSITION;
half2 uv : TEXCOORD0;
};

v2f vert (appdata_img v){
v2f o;
o.pos = UnityObjectToClipPos (v.vertex);
o.uv = MultiplyUV (UNITY_MATRIX_TEXTURE0, v.texcoord.xy);
return o;
}

sampler2D _MainTex; //Reference in Pass is necessary to let us use this variable in shaders
float _CornerX1;
float _CornerX2;
float _CornerY1;
float _CornerY2;

fixed4 frag (v2f i) : COLOR {

float tx = lerp(_CornerX1, _CornerX2, i.uv.x);
float ty = lerp(1.0 - _CornerY1, 1.0 - _CornerY2, 1.0 - i.uv.y);
fixed4 col = tex2D(_MainTex, fixed2(tx, ty));
return col;
}
ENDCG
}
}
FallBack "Diffuse"
}