20160503

PlayOn Stream Recorder Ad Removal

I have been enjoying using PlayOn to record video streams for my Plex server to host, then syncing them to my mobile devices to watch on the bus or when I have some down time, without using my mobile data.

I was a little annoyed with the fact that I needed to use the PlayOn player to skip the ads, as the PlayOn app doesn't let you store the files locally.  To fix this I created a Powershell script that uses ffmpeg to remove the chapters in the mp4 video files that are marked as "Advertisement".  I also trim the first few and last few seconds of the video to remove the annoying PlayOn logo.

It is not perfect, especially for HBO, they have been adding lots of previews/trailers for their content, and PlayOn doesn't recognize it as an ad, but it does a pretty good job on most.

Here is the script, it's a little sloppy but is good enough.
#
# Script.ps1
#
$InputBasePath = "E:\Video\PlayOnRecordings\"
$OutputBasePath = "S:\Video\PlayOnRecordings\"
$vidsToConvert = Get-ChildItem "$InputBasePath*.mp4" -Recurse
#$vidsToConvert = Get-ChildItem "E:\Video\PlayOnRecordings\Xfinity\*.mp4"
foreach ($vid in $vidsToConvert) {
$TARGETFILEPATH = $vid.FullName.Replace($InputBasePath,$OutputBasePath).Replace("PlayOnRecordings","TV Shows")
$TARGETDIR = $TARGETFILEPATH.Replace($vid.Name,"")

if(!(Test-Path -Path $TARGETFILEPATH)){
Write-Output $vid.FullName
New-Item $env:TEMP\MP4ChapterInfo.txt -type file -force
ffprobe -loglevel panic $vid -show_chapters -sexagesimal -print_format csv 1> $env:TEMP\MP4ChapterInfo.txt
$csvVidChapters = Import-Csv $env:TEMP\MP4ChapterInfo.txt -Header @("ObjName","Index","Fraction","StartMiliseconds","StartTime","EndMiliseconds","EndTime","ChapterTitle")

$firstVideo = 1
$lineCount = 1
Write-Output "Chapter Lines: "$csvVidChapters.length
foreach ($line in $csvVidChapters){
$OutputFile = ""
If ([convert]::ToInt32($line.Index) -lt 10) {
$OutputFile = ($env:TEMP + "\temp_" + $line.ChapterTitle + "_0" + $line.Index + $vid.Extension)

}
Else{
$OutputFile = ($env:TEMP + "\temp_" + $line.ChapterTitle + "_" + $line.Index + $vid.Extension)
}

if($firstVideo -eq 1){
Write-Output "Split FIRST Chapter: "$OutputFile
#$newStartTime = ([TimeSpan]::Parse($line.StartTime)).TotalSeconds
#$newStartTime = $newStartTime + 5
#$tsStart =  [timespan]::fromseconds($newStartTime)
#Write-Output "First File, Trim Playon Tag. "("{0:hh\:mm\:ss\,fff}" -f $tsStart)
#$newStartTime = ("{0:hh\:mm\:ss\,fff}" -f $tsStart) -replace ",","."
ffmpeg -loglevel panic -i $vid -ss $line.StartTime -to $line.EndTime -async 1 -acodec copy -vcodec copy $OutputFile
$firstVideo = 0
}
else{
Write-Output "Split Chapter: "$OutputFile
if($lineCount -eq $csvVidChapters.length){
$newEndTime = ([TimeSpan]::Parse($line.EndTime)).TotalSeconds
$newEndTime = $newEndTime - 10
$tsEnd =  [timespan]::fromseconds($newEndTime)
Write-Output "Last File, Trim Playon Tag. "("{0:hh\:mm\:ss\,fff}" -f $tsEnd)
$newEndTime = ("{0:hh\:mm\:ss\,fff}" -f $tsEnd) -replace ",","."
ffmpeg -loglevel panic -i $vid -ss $line.StartTime -to $newEndTime -async 1 -acodec copy -vcodec copy $OutputFile
}
else{
ffmpeg -loglevel panic -i $vid -ss $line.StartTime -to $line.EndTime -async 1 -acodec copy -vcodec copy $OutputFile
}

}
$lineCount++
}
Remove-Item $env:TEMP\MP4ChapterInfo.txt

$vidsToCombine = Get-ChildItem $env:TEMP\temp_Video*.mp4

$count = 0
New-Item $env:TEMP\MP4ConcatList.txt -type file -force
foreach ($vid_Video in $vidsToCombine) {
Write-Output $vid_Video.Length

$VidLength = ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 -sexagesimal $vid_Video
Write-Output "Section Length: "([TimeSpan]::Parse($VidLength)).TotalSeconds

#If ($vid_Video.Length -gt 2000000){
If (([TimeSpan]::Parse($VidLength)).TotalSeconds -gt 30){
Write-Output "Add Concat: "$vid_Video.FullName
"file '" + $vid_Video.FullName + "'" | Out-File $env:TEMP\MP4ConcatList.txt -Append -encoding default
$count++
}
else{
Write-Output "Skip Video Concat: "$vid_Video.FullName
}
Write-Output $count
}


if(!(Test-Path -Path $TARGETDIR )){
Write-Output "Directory does not exist - CREATE DIRECTORY: "$TARGETDIR
New-Item -ItemType directory -Path $TARGETDIR
}

!(Test-Path -Path $TARGETFILEPATH)
$TARGETFILEPATH
If(!(Test-Path -Path $TARGETFILEPATH)){
Write-Output "Count: "$count
If ($count -gt 0){
Write-Output "Build Vid from Chapters..."
ffmpeg -loglevel panic -f concat -i $env:TEMP\MP4ConcatList.txt -c copy $TARGETFILEPATH
}
else{
Write-Output "No Chapters - Copy Vid to: "$TARGETFILEPATH

$newStartTime = 0 #([TimeSpan]::Parse("0:00:00:00,000")).TotalSeconds
$newStartTime = $newStartTime + 5
$tsStart =  [timespan]::fromseconds($newStartTime)
Write-Output "Trim Playon Tag. "("{0:hh\:mm\:ss\,fff}" -f $tsStart)
$newStartTime = ("{0:hh\:mm\:ss\,fff}" -f $tsStart) -replace ",","."

$VidLength = ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 -sexagesimal $vid
Write-Output $VidLength

$newEndTime = ([TimeSpan]::Parse($VidLength)).TotalSeconds
$newEndTime = $newEndTime - 10
$tsEnd =  [timespan]::fromseconds($newEndTime)
Write-Output "Last File, Trim Playon Tag. "("{0:hh\:mm\:ss\,fff}" -f $tsEnd)
$newEndTime = ("{0:hh\:mm\:ss\,fff}" -f $tsEnd) -replace ",","."

ffmpeg -loglevel panic -i $vid -ss $newStartTime -to $newEndTime -async 1 -acodec copy -vcodec copy $TARGETFILEPATH
#Copy-Item $vid -Destination $TARGETFILEPATH
}
}

###Remove-Item $strRemoval -include $vid.Extension
Remove-Item $env:TEMP\MP4ConcatList.txt
Remove-Item $env:TEMP\temp_Video_*.mp4
Remove-Item $env:TEMP\temp_Advertisement_*.mp4

}
else{
Write-Output "Vid Exists Skip: "$vid.FullName
}
}

20140825

Detecting changes to data in a row

I am working on a project where I need to be able to quickly tell if changes in data contained in a row have occurred.  After playing around with HASHBYTES unsatisfactorily, I did some searches and came across this post: http://sqlserverplanet.com/data-warehouse/detecting-row-level-changes

This appears to be a decent method of discovering changes without the overhead of doing a full byte by byte compare.

20140213

VS2012 BI Installer error - Rule "Same architecture installation" failed.

I was trying to upgrade my Visual Studio 2012 to allow editing Business Intelligence projects by installing SQL Server Data Tools (thread on that topic), so I downloaded this installer for the SQL Server Data Tools and chose an existing SQL Server 2012 instance to install to, then I got this error 'Rule "Same architecture installation" failed.'

After some searching it appears that you need to choose to perform a "new installation".

Here are the links where I found this information:
http://social.msdn.microsoft.com/Forums/sqlserver/en-US/4ae33edc-c1ad-44df-bc74-ffb30ea91287/64-bit-microsoft-sql-server-data-tools-business-intelligence-for-visual-studio-2012-?forum=sqlreportingservices

http://blogs.msdn.com/b/jenss/archive/2013/03/07/finally-the-family-united-sql-server-bi-project-templates-now-available-in-visual-studio-2012.aspx

20140123

List Tables from ODBC

I was wanting to see the available catalogs on a linked server, but the catalogs branch was empty, so I thought I would try finding a query that would show them.

I'd done this before with TSQL on SQL Server:
SELECT * FROM sys.objects
or
SELECT * FROM sysobjects

But I had never done it for an ODBC linked server (this one happens to be Oracle)
After some conversations and searching I came up with this:
SELECT * FROM USER_TAB_PRIVS;
and for permissions:
SELECT * FROM USER_ROLE_PRIVS;

If you are also using a Linked Server you will need to use the OPENQUERY function
SELECT * FROM OPENQUERY([Linked Server Name], 'SELECT * FROM USER_TAB_PRIVS;')

You may even need to build the text and run it using sp_executesql:
DECLARE @Query nvarchar(max) = 'SELECT * FROM OPENQUERY([Linked Server Name], ''SELECT * FROM USER_TAB_PRIVS;'')'
PRINT @Query
exec sp_executesql @Query;

20140122

TSQL Temp Tables # vs ##

Using the # prefix for a temporary table creates that table in the tempdb.  The ## prefix does the same, but makes it global, so you can access it from other procedures.

Both will persist and need to be dropped to free up memory and disk space.

More here.

20140121

Automate backup file maintenance with Python

I automated my disk backups, but I kept having to delete the extra backup files manually so I wouldn't run out of space.  I decided I needed to automate that task as well.

Add the following Python script to a task so it will run automatically.

Python script:
import os
import datetime
import fnmatch

dir_to_search = "[full path to backup directory]"
for dirpath, dirnames, filenames in os.walk(dir_to_search):
   intCountOfTotalBackups = len(fnmatch.filter(filenames, '[Backup name, same as the Macrium XML file]*-00-00.mrimg'))
   print "Total Backups: " + str(intCountOfTotalBackups)
   intCountOfBackupsToDelete = 0
 
   for file in fnmatch.filter(filenames, '[Backup name, same as the Macrium XML file]*-00-00.mrimg'):
      curpath = os.path.join(dirpath, file)
      file_modified = datetime.datetime.fromtimestamp(os.path.getmtime(curpath))
      #keep a weeks worth of backups
      if datetime.datetime.now() - file_modified > datetime.timedelta(hours=168):
          intCountOfBackupsToDelete += 1

   print intCountOfBackupsToDelete
   #always keep at least a few backups, even if they are old.
   if intCountOfBackupsToDelete < (intCountOfTotalBackups - 4):
      print str(intCountOfBackupsToDelete) + ' < ' + str(intCountOfTotalBackups - 4)
      for file in fnmatch.filter(filenames, 'Atom_Nightly*-00-00.mrimg'):
         curpath = os.path.join(dirpath, file)
         file_modified = datetime.datetime.fromtimestamp(os.path.getmtime(curpath))
         if datetime.datetime.now() - file_modified > datetime.timedelta(hours=168):
             os.remove(curpath)
             print "remove " + curpath
#input("Press Enter to continue...")

Automate disk backups with Macrium Reflect (free)

I've had really good luck with Macrium Reflect Free.  It is a free disk imaging program, and has most of the features of the major corporate players, but there is a free version.

With a little looking around I figured out how to automate backups with the Windows Task Scheduler.

You have to run an initial backup, and save it so you can run it again later, it saves as an XML file.

Now you just have Task Scheduler run a batch file containing the following:
call "C:\Program Files\Macrium\Reflect\Reflect.exe" -e -w "[path to Macrium saved backup document]\[backup name].xml"

This will run the backup on whatever schedule you have set.

Create a Podcast RSS XML feed file from a directory using Python

So I had a bunch of items in folders that I wanted to access from my phone without having to store on it, also I wanted to use a podcasting program (Downcast for iOS) so I wouldn't have to keep track to which ones I had listened to already.  This is really useful for shows that post a single episode to their feed for a limited time, then remove it.  This was a great opportunity to learn how to create an RSS feed.

I wanted something that I would be able to port to virtually any host machine, so I decided to use Python to create the RSS feed.  Using Python to create the RSS feed makes it so I can easily modify and run the process without having to run builds and update other files.  For ease of running it I actually use the task scheduler to kick off a batch file which calls the Python RSS XML generator.

I have a little Netbook that I use for a home server, it's nice and low power but does most things I need.  I have setup XAMPP as a web host so I can access the RSS feed and audio files.  I have gPodder download the episodes locally then every night I use Windows Task Scheduler to run a batch file that runs a Python script to build the RSS XML files.  Then in the morning Downcast checks for updates and all the new episodes show up, ready to play.

gPodder is really neat because it will also download Youtube video, so I include video podcasts for some of my favorite Youtube channels, this way I can have it download locally so I don't burn through tons of cell data if I want to watch it while I'm out and about.

Anyway, here is the batch file:
[path to Python script]\createRSSFeed.py "[path to directory containing files (mp3,mp4)]" "[path to outputfile]\[output file name].xml" "[Feed Name]" "[Feed Description]" "[path to gPodder download directory]" "[relative web path]" true[use file timestamp]

And here is the Python script (adapted from stuffaboutcode.com):
# update environment to handle Unicode
PYTHONIOENCODING="utf-8"

# import libraries
import os
import os.path
import sys
import datetime
import time
import re
import fnmatch
import urllib2

# import constants from stat library
from stat import * # ST_SIZE ST_MTIME

 # import ID3 tag reader
######from mutagen.id3 import ID3, ID3TimeStamp, TDRC
from time import strptime, strftime

# format date method
def formatDate(dt):
    return dt.strftime("%a, %d %b %Y %H:%M:%S +0000")


# get the item/@type based on file extension
def getItemType(fileExtension):
    if fileExtension == "aac" or fileExtension == "mp3" or fileExtension == "m4a":
         mediaType = "audio/mpeg"
    elif fileExtension == "mp4" or fileExtension == "avi":
         mediaType = "video/mpeg"
    else:
         mediaType = "audio/mpeg"
    return mediaType


# constants
# the podcast name
rssTitle = sys.argv[3]
# the podcast description
rssDescription = sys.argv[4]
# the url where the podcast items will be hosted
rssSiteURL = "http://[domain address]"
# the url of the folder where the items will be stored
strURL1 =  sys.argv[1].replace(sys.argv[5],sys.argv[6]).replace("\\","/")
rssItemURL = rssSiteURL + urllib2.quote(strURL1.encode("utf8"))
# the url to the podcast html file
rssLink = rssSiteURL #+ ""
# url to the podcast image
strURL2 = strURL1 + "/folder.jpg"
rssImageUrl = rssSiteURL + urllib2.quote(strURL2.encode("utf8"))
# the time to live (in minutes)
rssTtl = "60"
# contact details of the web master
rssWebMaster = "[email address]"


#record datetime started
now = datetime.datetime.now()


# command line options
#    - python createRSFeed.py /path/to/podcast/files /path/to/output/rss/gPodder.xml
# directory passed in
rootdir = sys.argv[1]
# output RSS filename
outputFilename = sys.argv[2]


# Main program

# open rss file
outputFile = open(outputFilename, "w")


# write rss header
outputFile.write("\n")
outputFile.write("\n")
outputFile.write("\n")
outputFile.write("" + rssTitle + "\n")
outputFile.write("" + rssDescription + "\n")
outputFile.write("" + rssLink + "\n")
outputFile.write("" + rssTtl + "\n")
outputFile.write("" + rssImageUrl + "" + rssTitle + "" + rssLink + "\n")
outputFile.write("CC 2014\n")
outputFile.write("" + formatDate(now) + "\n")
outputFile.write("" + formatDate(now) + "\n")
outputFile.write("" + rssWebMaster + "\n")


# walk through all files and subfolders
includes = ['*.mp3','*.mp4','*.m4a']
includes = r'|'.join([fnmatch.translate(x) for x in includes])
for path, subFolders, files in os.walk(rootdir):
    # exclude/include files
    files = [os.path.join(path, f) for f in files]
    #files = [f for f in files if not re.match(excludes, f)]
    files = [f for f in files if re.match(includes, f)]
    files.sort(key=lambda x: os.stat(os.path.join(path, x)).st_ctime)
    for file in files:
        #if fnmatch.fnmatch(path,includes):
        # split the file based on "." we use the first part as the title and the extension to work out the media type
        fileNameBits = file.split(".")
        # get the full path of the file
        fullPath = os.path.join(path, file)
        # get the stats for the file
        fileStat = os.stat(fullPath)
        # find the path relative to the starting folder, e.g. /subFolder/file
        relativePath = fullPath[len(rootdir):]
        relativePath = relativePath.replace(sys.argv[5],sys.argv[6]).replace("\\","/")
        relativePath = urllib2.quote(relativePath.encode("utf8"))

        # Extract ID3 info
        #title
        #  audio = ID3(fullPath)
        #     fileTitle = audio["TIT2"].text[0]
        #date
        #  datePos = fileTitle.find(":")
        #  fileDate = fileTitle[(datePos+2):]
        #  fileDate = time.strptime(fileDate, "%B %d, %Y")
        #correct date format in the file's ID3 tag
        #  fileTS = ID3TimeStamp(time.strftime("%Y-%m-%d", fileDate))
        #  audio['TDRC'] = TDRC(0, [fileTS])
        #description
        #  fileDesc = audio["COMM::'eng'"].text[0]
        #  fileDesc = fileDesc.encode('ascii', 'ignore') #converts everything to ASCII prior to writing out
        #  audio.save()

        # write rss item
        outputFile.write("\n")
        outputFile.write("" + fileNameBits[0].replace(sys.argv[1]+ "\\","").replace("_", " ").replace("'","'") + "\n")
        #outputFile.write("A description\n")
        #outputFile.write("" + fileDesc + "\n")
        outputFile.write("" + rssItemURL + relativePath + "\n")
        outputFile.write("" + rssItemURL + relativePath + "\n")
        if sys.argv[7] == "true":
            print(sys.argv[7])
            outputFile.write("" + formatDate(datetime.datetime.fromtimestamp(fileStat[ST_MTIME])) + "\n")
        else: outputFile.write("" + formatDate(now) + "\n")
        outputFile.write("\n")
        outputFile.write("
\n")
     
# write rss footer
outputFile.write("
\n")outputFile.write("
")outputFile.close()
print "complete"

Record an MP3 stream with VLC

I wanted to archive an audio stream to add to my personal podcast feed, essentially to time-shift the broadcast for when I wanted to listen to it.

You can use VLC to record the stream, I use a Windows Task Scheduler to start a Batch file that starts VLC and records the stream to a file name with the date in the file name.

Batch File:
@echo off
rem
for /f "tokens=2-4 delims=/ " %%a in ('date /T') do set year=%%c
for /f "tokens=2-4 delims=/ " %%a in ('date /T') do set month=%%a
for /f "tokens=2-4 delims=/ " %%a in ('date /T') do set day=%%b
set TODAY=%year%%month%%day%
call "C:\Program Files (x86)\VideoLAN\VLC\vlc.exe" --demux dump --demuxdump-file "[Path]\[show name]\[show name]_%year%%month%%day%.mp3" "[stream path]"

20121113

Any CPU

Could not load file or assembly '<name>' or one of its dependencies. An attempt was made to load a program with an incorrect format.

Ran into the above error, found this post.  Changed from x86 to Any CPU, and it worked!

20121004

Microsoft SQL Reporting - Repeating headers on new pages

This is something I've battled for years.

I finally figured it out this week.

You need to select your tablix, go to the right of column groups, select the down arrow, check "Advanced Mode", select the static member from "Row Groups", if you have more than one, look for the one that selects the first cell in your header row ("WO #" in my case), make the properties window (hotkey F4) shows "KeepWithGroup" as "After".  That means you have the proper object selected.  Now, change "RepeatOnNewPage" to "True" and do the same for "FixedData". 

That should do it.  Seems simple once you know, but man is it had to find out.  I hope this helps.



20110224

IIS Enable 32 bit applications "HTTP Error 500.19 - Internal Server Error"

http://forums.iis.net/t/1149768.aspx

"You cannot get rid of it for that site - compression schemes are registered globally - to get rid of the scheme altogether, run below
%windir%\system32\inetsrv\appcmd.exe set config -section:system.webServer/httpCompression /-[name='xpress']"


20100603

Restore Database and "Single User" mode

I recently restored a database and it gave an error after restore about indexes and rowguid columns, but the database was there, so I'm going with it.

But it showed up as "databasename (single user)" and I couldn't use it because "there is already a user logged into this database".

so I had to change it, this link was very helpful:
http://remidian.com/mssql/remove-sql-server-database-from-single-user-mode.html

SELECT spid
FROM sysprocesses p inner join sysdatabases d on p.dbid = d.dbid
WHERE d.name = 'DatabaseName';

KILL <spid returned above>

EXEC sp_dboption 'DatabaseName', 'single user', 'FALSE';



20100316

Sending Email With SQLServer 2008

It is beneficial to use the new "Database Mail" section in SQL Server Management Studio, you can setup profiles etc.

--Start the mail watcher, looks for mail then sends it when it has time.
EXEC msdb.dbo.sysmail_start_sp;

--Queue the Message.
EXEC   msdb.dbo.sp_send_dbmail
     @profile_name = 'ProfileName'
     ,@recipients       = 'Address@EmailAddress.com'
     ,@subject          = 'Subject'
     ,@body              = 'Body'
     ,@body_format        = 'HTML' --Or Text
     --,@query                = @UpdateMessageQuery
     --@file_attachments = @file_attachments       

20100129

Processing QBXML In SQL Server

I've been working with QuickBooks again, and this time I need to parse some QBXML in the database so I can do some data syncronization on a schedule.

SQL Server 2005+ has some nice XML tools that make things a lot easier.  I was having trouble getting things to show up, but eventually found some pretty basic examples.

I'm parsing an Invoice Query response.

Here is what I came up with:
-- variable to hold document reference
DECLARE @docHandle int

-- variable to hold actual XML
DECLARE @xmlDocument nvarchar(max) -- or xml type

-- Fill XML variable
SELECT TOP 1 @xmlDocument = QBXML FROM QBBridge ORDER BY QBBridgeID DESC

-- Use built in Procedure to process the XML into Data
EXEC sp_xml_preparedocument @docHandle OUTPUT, @xmlDocument;

    DECLARE @ParentID int
    DECLARE qbXMLCursor CURSOR FOR
        -- Get All InvoiceRet Objects.
        SELECT ID FROM OPENXML(@docHandle, N'/QBXML/QBXMLMsgsRs') WHERE NodeType = 1 AND LocalName = 'InvoiceRet'

    -- cycle through InvoiceRets and compile Sub Data.
    OPEN qbXMLCursor
    FETCH NEXT FROM qbXMLCursor INTO @ParentID

   
    WHILE @@FETCH_STATUS = 0
    BEGIN

        -- Use Common Table Expression to compile values to names of parent nodes
        -- I named my table columns <ParentName>.<ParentName>.<Name> so I'm sitting pretty for an insert from this.
        WITH InvoiceValues ([LocalName], [Value], [Level], [ID]) AS
        (
            SELECT    LocalName,
                    [text],
                    1 AS [Level],
                    [ID]
            FROM    OPENXML(@docHandle, N'/QBXML/QBXMLMsgsRs')
            WHERE ParentID = @ParentID
            UNION ALL
            SELECT    CASE WHEN SubValues.LocalName = '#text' THEN
                        InvoiceValues.LocalName
                    ELSE
                     InvoiceValues.LocalName + '.' + SubValues.LocalName
                     END As LocalName,
                    [text],
                    InvoiceValues.[Level] + 1 AS [Level],
                    SubValues.[ID]
            FROM    InvoiceValues
                    INNER JOIN OPENXML(@docHandle, N'/QBXML/QBXMLMsgsRs') As SubValues
                    ON InvoiceValues.ID = SubValues.ParentID
        )
        SELECT    [LocalName], [Value]
        FROM    InvoiceValues
        WHERE Value IS NOT NULL
           
        FETCH NEXT FROM qbXMLCursor INTO @ParentID
    END
    CLOSE qbXMLCursor
    DEALLOCATE qbXMLCursor

EXEC sp_xml_removedocument @docHandle