import groovy.util.XmlParser
import org.serviio.library.metadata.*
import org.serviio.library.online.*
import groovy.json.JsonSlurper

 /********************************************************************
 * foodnetwork.com plugin for Serviio (US ONLY)
 * Based on the Plex plugin
 *
 * Version:
 *    V1: - Dec 2, 2013  - Initial Release by Leonardo
 *	  V2: - Dec 10, 2013 - Accept a wider range of valid URLs
 * Must be installed as a WebResource
 * Valid URL: http://www.foodnetwork.com/
 
 ********************************************************************/
  


class FoodNetwork extends WebResourceUrlExtractor {
    
    final VALID_FEED_URL = '^(?:http://)?(?:www\\.)?foodnetwork.com(/.*)?'

	final EPISODE_LIST = 'http://www.foodnetwork.com/food-network-full-episodes/videos/index.html'
	
	
	final JSON_URL ='http://www.foodnetwork.com/food/feeds/channel-video/0,,FOOD_CHANNEL_%s_1_%s_RA,00.json'  
	
   	int getVersion() {
		return 2
	}
	
	int getExtractItemsTimeout(){
		return 30
	}
	
	WebResourceContainer errorHandlerWRC(String e){
		List<WebResourceItem> items = []
		println e
		log(e)
		items <<  new WebResourceItem(title: e, additionalInfo: ['url':'http://error','thumbnailUrl':'http://fake.jpg'])
		WebResourceContainer wrc = new WebResourceContainer(title: "Error", items: items)
		return wrc
	}
	
	void errorHandler(String e){
		println e
		log(e)
		return 
	}

    String getExtractorName() {
        return 'FoodNetwork.com'
    }
    
    boolean extractorMatches(URL feedUrl) {
        return feedUrl ==~ VALID_FEED_URL
    }
       
    WebResourceContainer extractItems(URL resourceUrl, int maxItemsToRetrieve) {
	
		resourceUrl= new URL(EPISODE_LIST)
	
        List<WebResourceItem> items = []
		Date releaseDate
	    def json
		def url
		def i=0
		//def j=0
		
        def pageContent = resourceUrl.getText()
		
		def jsMatcher = pageContent =~ "<li data-channel=\"([0-9]*)\">" 
		
	
		if (jsMatcher.count <= 0) {
           return errorHandlerWRC("FoodNetwork: data channels not found")
		}
		
		
		for  (j in 0..<jsMatcher.count){
		
		def	section = jsMatcher[j][1]
		
		//set a limit to the unlimitied
		if (maxItemsToRetrieve <= -1) {
			maxItemsToRetrieve=100
		}
	
        def contentUrl = new URL(String.format(JSON_URL, section,maxItemsToRetrieve))

		try{
			 json = new JsonSlurper().parseText(contentUrl.getText().replace('var snapTravelingLib = ',''))
		}catch (FileNotFoundException e){
			errorHandlerWRC("FoodNetwork: JSON not found")
		
		}

		
		try{
		
		for (i=0;i<Integer.valueOf(json[0].videos.size);i++){
		
			Map<String,String> additionalInfo = new HashMap<String, String>();
			url=json[0].videos[i].videoURL
			url = url.replace('http://wms','rtmp://flash')
			url = url.replace('scrippsnetworks.com','scrippsnetworks.com/ondemand')//add check
			url = url.replace('ondemand/','ondemand/&')
			url = url.replace('.wmv','')
			url = url.split('&')

			
			additionalInfo.put("url",url)
			additionalInfo.put("thumbnailUrl",json[0].videos[i].thumbnailURL)

			//add airdate if available
			items << new WebResourceItem(title: json[0].videos[i].label,additionalInfo: additionalInfo)
		}
		}catch(e){
			errorHandlerWRC("FoodNetwork: Json Error parsing" & e)
		}
		
		}
		
        return new WebResourceContainer(title: "FoodNetwork", items: items)
    }

    ContentURLContainer extractUrl(WebResourceItem item, PreferredQuality requestedQuality) {

	
		//error check ??
		if (item.additionalInfo.url.contains("http://error")){
			def rtmpUrl = "rtsp://error"
			def secCode = "abcdefghi"
			def cacheKey = "http://lastitem_" + secCode
			def expiresImmediately = false
			return new ContentURLContainer( contentUrl: rtmpUrl, thumbnailUrl: item.additionalInfo.thumbnailUrl,cacheKey: cacheKey, expiresImmediately: expiresImmediately, live: true)
		}
	 
		def stream_source =item.additionalInfo.url[0]
		def clip=item.additionalInfo.url[1]
  
        def contentUrl = "${stream_source} playpath=${clip}"
        
        def cacheKey = "FoodNetwork_${clip}_${requestedQuality}"

        return new ContentURLContainer(contentUrl: contentUrl, thumbnailUrl: item.additionalInfo.thumbnailUrl, expiresImmediately: true, cacheKey : cacheKey)    
    }
    
    static void main(args) {
        FoodNetwork extractor = new  FoodNetwork()
		URL url = new URL("http://foodnetwork.com")
		println extractor.extractorMatches(url)
        WebResourceContainer container = extractor.extractItems( url, 1)
		
		if ( container ){
        container.getItems().each {
            ContentURLContainer result = extractor.extractUrl(it, PreferredQuality.MEDIUM)
            println result 
		}
        }   
    }
}